diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 31d3fbc..b483067 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,12 +17,12 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v4 with: - version: 9 + version: 9.9.0 - name: Use Node uses: actions/setup-node@v4 with: node-version: ${{ matrix.node-version }} - cache: 'pnpm' + cache: "pnpm" - run: pnpm install --frozen-lockfile --child-concurrency=10 - run: cp wrangler.toml.example wrangler.toml diff --git a/README.md b/README.md index 3bdc13c..ea860f2 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,8 @@ the target registry and setup the credentials. Right now there is some limitations with this container registry. - Pushing with docker is limited to images that have layers of maximum size 500MB. Refer to maximum request body sizes in your Workers plan. -- To circumvent that limitation, you can manually add the layer and the manifest into the R2 bucket or use a client that is able to chunk uploads in sizes less than 500MB (or the limit that you have in your Workers plan). +- To circumvent that limitation, you can either manually interact with the R2 bucket to upload the layer or take a + peek at the `./push` folder for some inspiration on how can you push big layers. - If you use `npx wrangler dev` and push to the R2 registry with docker, the R2 registry will have to buffer the request on the Worker. ## License diff --git a/package.json b/package.json index 68a95ed..88765a2 100644 --- a/package.json +++ b/package.json @@ -19,20 +19,20 @@ "@cloudflare/workers-types": "^4.20240614.0", "cross-env": "^7.0.3", "eslint": "^8.57.0", - "miniflare": "3.20240208.0", + "miniflare": "3.20240909.1", "typescript": "^5.3.3", "vitest": "^1.3.1", "vitest-environment-miniflare": "^2.14.2", - "wrangler": "^3.61.0" + "wrangler": "^3.78.4" }, "engines": { "node": ">=18" }, "author": "", "license": "Apache-2.0", - "pnpm": { - "overrides": { - "@types/node": "18.15.3" - } - } + "pnpm": { + "overrides": { + "@types/node": "18.15.3" + } + } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7157c89..fa61009 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -34,8 +34,8 @@ importers: specifier: ^8.57.0 version: 8.57.0 miniflare: - specifier: 3.20240208.0 - version: 3.20240208.0 + specifier: 3.20240909.1 + version: 3.20240909.1 typescript: specifier: ^5.3.3 version: 5.3.3 @@ -46,8 +46,8 @@ importers: specifier: ^2.14.2 version: 2.14.2(vitest@1.3.1(@types/node@18.15.3)) wrangler: - specifier: ^3.61.0 - version: 3.61.0(@cloudflare/workers-types@4.20240614.0) + specifier: ^3.78.4 + version: 3.78.5(@cloudflare/workers-types@4.20240614.0) packages: @@ -58,69 +58,43 @@ packages: '@cfworker/base64url@1.12.5': resolution: {integrity: sha512-pNLrz0D0MguzFLJisBUv+XOTkpRpRTIMI7/r2QwTWI2MR5VJ7Hysd6ug6DBWksKFy7TK3hCf+qejufdJSN5X+A==} - '@cloudflare/kv-asset-handler@0.3.3': - resolution: {integrity: sha512-wpE+WiWW2kUNwNE0xyl4CtTAs+STjGtouHGiZPGRaisGB7eXXdbvfZdOrQJQVKgTxZiNAgVgmc7fj0sUmd8zyA==} + '@cloudflare/kv-asset-handler@0.3.4': + resolution: {integrity: sha512-YLPHc8yASwjNkmcDMQMY35yiWjoKAKnhUbPRszBRS0YgH+IXtsMp61j+yTcnCE3oO2DgP0U3iejLC8FTtKDC8Q==} engines: {node: '>=16.13'} - '@cloudflare/workerd-darwin-64@1.20240208.0': - resolution: {integrity: sha512-64qjsCUz6VtjXnUex5D6dWoJDuUBRw1ps2TEVH9wGJ4ubiLVUxKhj3bzkVy0RoJ8FhaCKzJWWRyTo4yc192UTA==} + '@cloudflare/workerd-darwin-64@1.20240909.0': + resolution: {integrity: sha512-nJ8jm/6PR8DPzVb4QifNAfSdrFZXNblwIdOhLTU5FpSvFFocmzFX5WgzQagvtmcC9/ZAQyxuf7WynDNyBcoe0Q==} engines: {node: '>=16'} cpu: [x64] os: [darwin] - '@cloudflare/workerd-darwin-64@1.20240610.1': - resolution: {integrity: sha512-YanZ1iXgMGaUWlleB5cswSE6qbzyjQ8O7ENWZcPAcZZ6BfuL7q3CWi0t9iM1cv2qx92rRztsRTyjcfq099++XQ==} - engines: {node: '>=16'} - cpu: [x64] - os: [darwin] - - '@cloudflare/workerd-darwin-arm64@1.20240208.0': - resolution: {integrity: sha512-eVQrAV200LhwLY6JZLx3l2lDrjsTC86lqnvH+RSeM43bAcdneC6lVfykHnTaOTgYFvYQbqRkn9ICWxXj1V9L5g==} - engines: {node: '>=16'} - cpu: [arm64] - os: [darwin] - - '@cloudflare/workerd-darwin-arm64@1.20240610.1': - resolution: {integrity: sha512-bRe/y/LKjIgp3L2EHjc+CvoCzfHhf4aFTtOBkv2zW+VToNJ4KlXridndf7LvR9urfsFRRo9r4TXCssuKaU+ypQ==} + '@cloudflare/workerd-darwin-arm64@1.20240909.0': + resolution: {integrity: sha512-gJqKa811oSsoxy9xuoQn7bS0Hr1sY+o3EUORTcEnulG6Kz9NQ6nd8QNdp2Hrk2jmmSqwrNkn+a6PZkWzk6Q0Gw==} engines: {node: '>=16'} cpu: [arm64] os: [darwin] - '@cloudflare/workerd-linux-64@1.20240208.0': - resolution: {integrity: sha512-ivZ2UuCvi44j8JZ++XlQzSYajt5ptvAdwlh3WPpCcygtHXEh6SVo8QXEUOXhPbv861C0HZMYxLCaLqlpQDWB8g==} + '@cloudflare/workerd-linux-64@1.20240909.0': + resolution: {integrity: sha512-sJrmtccfMg73sZljiBpe4R+lhF58TqzqhF2pQG8HRjyxkzkM1sjpZqfEFaIkNUDqd3/Ibji49fklhPCGXljKSg==} engines: {node: '>=16'} cpu: [x64] os: [linux] - '@cloudflare/workerd-linux-64@1.20240610.1': - resolution: {integrity: sha512-2zDcadR7+Gs9SjcMXmwsMji2Xs+yASGNA2cEHDuFc4NMUup+eL1mkzxc/QzvFjyBck98e92rBjMZt2dVscpGKg==} - engines: {node: '>=16'} - cpu: [x64] - os: [linux] - - '@cloudflare/workerd-linux-arm64@1.20240208.0': - resolution: {integrity: sha512-aLfvl9kXQKbM7aLvfL0HbOt5VEgv15mEZGyFKyDldJ8+nOXH6nYPma1ccwF8BHmu8otHc420eyPr2xPKhLSJnw==} + '@cloudflare/workerd-linux-arm64@1.20240909.0': + resolution: {integrity: sha512-dTbSdceyRXPOSER+18AwYRbPQG0e/Dwl2trmfMMCETkfJhNLv1fU3FFMJPjfILijKnhTZHSnHCx0+xwHdon2fg==} engines: {node: '>=16'} cpu: [arm64] os: [linux] - '@cloudflare/workerd-linux-arm64@1.20240610.1': - resolution: {integrity: sha512-7y41rPi5xmIYJN8CY+t3RHnjLL0xx/WYmaTd/j552k1qSr02eTE2o/TGyWZmGUC+lWnwdPQJla0mXbvdqgRdQg==} - engines: {node: '>=16'} - cpu: [arm64] - os: [linux] - - '@cloudflare/workerd-windows-64@1.20240208.0': - resolution: {integrity: sha512-Y6KMukWnorsSmPx6d82IuJ4SU8sX1+2y+w1uFJ76sucSgXqUAN1fmjG+EyzRVbcbsxRGBCD9c1Pn8T1amMLEYA==} + '@cloudflare/workerd-windows-64@1.20240909.0': + resolution: {integrity: sha512-/d4BT0kcWFa7Qc0K4K9+cwVQ1qyPNKiO42JZUijlDlco+TYTPkLO3qGEohmwbfMq+BieK7JTMSgjO81ZHpA0HQ==} engines: {node: '>=16'} cpu: [x64] os: [win32] - '@cloudflare/workerd-windows-64@1.20240610.1': - resolution: {integrity: sha512-B0LyT3DB6rXHWNptnntYHPaoJIy0rXnGfeDBM3nEVV8JIsQrx8MEFn2F2jYioH1FkUVavsaqKO/zUosY3tZXVA==} - engines: {node: '>=16'} - cpu: [x64] - os: [win32] + '@cloudflare/workers-shared@0.5.3': + resolution: {integrity: sha512-Yk5Im7zsyKbzd7qi+DrL7ZJR9+bdZwq9BqZWS4muDIWA8MCUeSLsUC+C9u+jdwfPSi5It2AcQG4f0iwZr6jkkQ==} + engines: {node: '>=16.7.0'} '@cloudflare/workers-types@4.20240614.0': resolution: {integrity: sha512-fnV3uXD1Hpq5EWnY7XYb+smPcjzIoUFiZpTSV/Tk8qKL3H+w6IqcngZwXQBZ/2U/DwYkDilXHW3FfPhnyD7FZA==} @@ -757,10 +731,6 @@ packages: concat-map@0.0.1: resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} - consola@3.2.3: - resolution: {integrity: sha512-I5qxpzLv+sJhTVEoLYNcTW+bThDCPsit0vLNKShZx6rLtpilNpmmeTPaeqJb9ZE9dV3DGaeby6Vuhrw38WjeyQ==} - engines: {node: ^14.18.0 || >=16.10.0} - cookie@0.5.0: resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} engines: {node: '>= 0.6'} @@ -777,6 +747,9 @@ packages: data-uri-to-buffer@2.0.2: resolution: {integrity: sha512-ND9qDTLc6diwj+Xe5cdAgVTbLVdXbtxTJRXRhli8Mowuaan+0EJOtdqJ0QCHNSSPyoXGx9HX2/VMnKeC34AChA==} + date-fns@3.6.0: + resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==} + debug@4.3.4: resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} engines: {node: '>=6.0'} @@ -1090,13 +1063,13 @@ packages: resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} engines: {node: '>=12'} - miniflare@3.20240208.0: - resolution: {integrity: sha512-NnP3MQFh2pV7iETNmJzSlMBF/KhRA+XT4A7JLCfxunadQSPbTMMgbsZo9EfLloMwHMUhZGNVot3Pvh+VnT2joQ==} + miniflare@3.20240909.1: + resolution: {integrity: sha512-tdzJFApHmqFYlpjfpqBDnsE6dHUDLHejBrNgXftLfTf/ni5NySgXKnuntCCMdRtnTpjUKmkHiusGrBCf9b1rnA==} engines: {node: '>=16.13'} hasBin: true - miniflare@3.20240610.1: - resolution: {integrity: sha512-ZkfSpBmX3nJW00yYhvF2kGvjb6f77TOimRR6+2GQvsArbwo6e0iYqLGM9aB/cnJzgFjLMvOv1qj4756iynSxJQ==} + miniflare@3.20240909.3: + resolution: {integrity: sha512-HsWMexA4m0Ti8wTjqRdg50otufgoQ/I/rL3AHxf3dI/TN8zJC/5aMApqspW6I88Lzm24C+SRKnW0nm465PStIw==} engines: {node: '>=16.13'} hasBin: true @@ -1121,9 +1094,6 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - node-fetch-native@1.6.4: - resolution: {integrity: sha512-IhOigYzAKHd244OC0JIMIUrjzctirCmPkaIfhDeGcEETWof5zKYUW7e7MYvChGWh/4CJeXEgsRyGzuF334rOOQ==} - node-forge@1.3.1: resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} engines: {node: '>= 6.13.0'} @@ -1139,6 +1109,9 @@ packages: npx-import@1.1.4: resolution: {integrity: sha512-3ShymTWOgqGyNlh5lMJAejLuIv3W1K3fbI5Ewc6YErZU3Sp0PqsNs8UIU1O8z5+KVl/Du5ag56Gza9vdorGEoA==} + ohash@1.1.4: + resolution: {integrity: sha512-FlDryZAahJmEF3VR3w1KogSEdWX3WhA5GPakFx4J81kEAiHyLMpdLLElS8n8dfNadMgAne/MywcvmogzscVt4g==} + once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} @@ -1188,8 +1161,8 @@ packages: path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} - path-to-regexp@6.2.2: - resolution: {integrity: sha512-GQX3SSMokngb36+whdpRXE+3f9V8UzyAorlYvOGx87ufGHehNTn5lCxrKtLyZ4Yl/wEKnNnr98ZzOwwDZV5ogw==} + path-to-regexp@6.3.0: + resolution: {integrity: sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==} pathe@1.1.2: resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} @@ -1400,23 +1373,19 @@ packages: ufo@1.4.0: resolution: {integrity: sha512-Hhy+BhRBleFjpJ2vchUNN40qgkh0366FWJGqVLYBHev0vpHTrXSA0ryT+74UiW6KWsldNurQMKGqCm1M2zBciQ==} - ufo@1.5.3: - resolution: {integrity: sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==} + ufo@1.5.4: + resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==} undici@5.28.2: resolution: {integrity: sha512-wh1pHJHnUeQV5Xa8/kyQhO7WFa8M34l026L5P/+2TYiakvGy5Rdc8jWZVyG7ieht/0WgJLEd3kcU5gKx+6GC8w==} engines: {node: '>=14.0'} - undici@5.28.3: - resolution: {integrity: sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==} - engines: {node: '>=14.0'} - undici@5.28.4: resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==} engines: {node: '>=14.0'} - unenv-nightly@1.10.0-1717606461.a117952: - resolution: {integrity: sha512-u3TfBX02WzbHTpaEfWEKwDijDSFAHcgXkayUZ+MVDrjhLFvgAJzFGTSTmwlEhwWi2exyRQey23ah9wELMM6etg==} + unenv-nightly@2.0.0-1726478054.1e87097: + resolution: {integrity: sha512-uZso8dCkGlJzWQqkyjOA5L4aUqNJl9E9oKRm03V/d+URrg6rFMJwBonlX9AAq538NxwJpPnCX0gAz0IfTxsbFQ==} uri-js@4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} @@ -1502,22 +1471,17 @@ packages: engines: {node: '>=8'} hasBin: true - workerd@1.20240208.0: - resolution: {integrity: sha512-edFdwHU95Ww2SmjBvBJhbc7hhVXMEo6Y7qqSWCl6W9lGScTlCMCXd4AU3f/EGJ3P++FC+CWqu+XuAywebbKF2Q==} + workerd@1.20240909.0: + resolution: {integrity: sha512-NwuYh/Fgr/MK0H+Ht687sHl/f8tumwT5CWzYR0MZMHri8m3CIYu2IaY4tBFWoKE/tOU1Z5XjEXECa9zXY4+lwg==} engines: {node: '>=16'} hasBin: true - workerd@1.20240610.1: - resolution: {integrity: sha512-Rtut5GrsODQMh6YU43b9WZ980Wd05Ov1/ds88pT/SoetmXFBvkBzdRfiHiATv+azmGX8KveE0i/Eqzk/yI01ug==} - engines: {node: '>=16'} - hasBin: true - - wrangler@3.61.0: - resolution: {integrity: sha512-feVAp0986x9xL3Dc1zin0ZVXKaqzp7eZur7iPLnpEwjG1Xy4dkVEZ5a1LET94Iyejt1P+EX5lgGcz63H7EfzUw==} + wrangler@3.78.5: + resolution: {integrity: sha512-EqCQOuuxHCBHLSjWw7kWT/1PDSw38XUhSxPC3VnDcL7F6TukVBfHHyLFO4NYGTDDoH+G8KVK1bL1q8LXY2Rcbg==} engines: {node: '>=16.17.0'} hasBin: true peerDependencies: - '@cloudflare/workers-types': ^4.20240605.0 + '@cloudflare/workers-types': ^4.20240909.0 peerDependenciesMeta: '@cloudflare/workers-types': optional: true @@ -1577,39 +1541,29 @@ snapshots: dependencies: rfc4648: 1.5.3 - '@cloudflare/kv-asset-handler@0.3.3': + '@cloudflare/kv-asset-handler@0.3.4': dependencies: mime: 3.0.0 - '@cloudflare/workerd-darwin-64@1.20240208.0': + '@cloudflare/workerd-darwin-64@1.20240909.0': optional: true - '@cloudflare/workerd-darwin-64@1.20240610.1': + '@cloudflare/workerd-darwin-arm64@1.20240909.0': optional: true - '@cloudflare/workerd-darwin-arm64@1.20240208.0': + '@cloudflare/workerd-linux-64@1.20240909.0': optional: true - '@cloudflare/workerd-darwin-arm64@1.20240610.1': + '@cloudflare/workerd-linux-arm64@1.20240909.0': optional: true - '@cloudflare/workerd-linux-64@1.20240208.0': + '@cloudflare/workerd-windows-64@1.20240909.0': optional: true - '@cloudflare/workerd-linux-64@1.20240610.1': - optional: true - - '@cloudflare/workerd-linux-arm64@1.20240208.0': - optional: true - - '@cloudflare/workerd-linux-arm64@1.20240610.1': - optional: true - - '@cloudflare/workerd-windows-64@1.20240208.0': - optional: true - - '@cloudflare/workerd-windows-64@1.20240610.1': - optional: true + '@cloudflare/workers-shared@0.5.3': + dependencies: + mime: 3.0.0 + zod: 3.22.4 '@cloudflare/workers-types@4.20240614.0': {} @@ -2145,8 +2099,6 @@ snapshots: concat-map@0.0.1: {} - consola@3.2.3: {} - cookie@0.5.0: {} cross-env@7.0.3: @@ -2161,6 +2113,8 @@ snapshots: data-uri-to-buffer@2.0.2: {} + date-fns@3.6.0: {} + debug@4.3.4: dependencies: ms: 2.1.2 @@ -2520,18 +2474,18 @@ snapshots: mimic-fn@4.0.0: {} - miniflare@3.20240208.0: + miniflare@3.20240909.1: dependencies: '@cspotcode/source-map-support': 0.8.1 - acorn: 8.11.3 - acorn-walk: 8.3.2 + acorn: 8.12.0 + acorn-walk: 8.3.3 capnp-ts: 0.7.0 exit-hook: 2.2.1 glob-to-regexp: 0.4.1 stoppable: 1.1.0 - undici: 5.28.3 - workerd: 1.20240208.0 - ws: 8.16.0 + undici: 5.28.4 + workerd: 1.20240909.0 + ws: 8.17.1 youch: 3.3.3 zod: 3.22.4 transitivePeerDependencies: @@ -2539,7 +2493,7 @@ snapshots: - supports-color - utf-8-validate - miniflare@3.20240610.1: + miniflare@3.20240909.3: dependencies: '@cspotcode/source-map-support': 0.8.1 acorn: 8.12.0 @@ -2549,7 +2503,7 @@ snapshots: glob-to-regexp: 0.4.1 stoppable: 1.1.0 undici: 5.28.4 - workerd: 1.20240610.1 + workerd: 1.20240909.0 ws: 8.17.1 youch: 3.3.3 zod: 3.22.4 @@ -2577,8 +2531,6 @@ snapshots: natural-compare@1.4.0: {} - node-fetch-native@1.6.4: {} - node-forge@1.3.1: {} normalize-path@3.0.0: {} @@ -2594,6 +2546,8 @@ snapshots: semver: 7.6.0 validate-npm-package-name: 4.0.0 + ohash@1.1.4: {} + once@1.4.0: dependencies: wrappy: 1.0.2 @@ -2639,7 +2593,7 @@ snapshots: path-parse@1.0.7: {} - path-to-regexp@6.2.2: {} + path-to-regexp@6.3.0: {} pathe@1.1.2: {} @@ -2824,28 +2778,22 @@ snapshots: ufo@1.4.0: {} - ufo@1.5.3: {} + ufo@1.5.4: {} undici@5.28.2: dependencies: '@fastify/busboy': 2.1.1 - undici@5.28.3: - dependencies: - '@fastify/busboy': 2.1.1 - undici@5.28.4: dependencies: '@fastify/busboy': 2.1.1 - unenv-nightly@1.10.0-1717606461.a117952: + unenv-nightly@2.0.0-1726478054.1e87097: dependencies: - consola: 3.2.3 defu: 6.1.4 - mime: 3.0.0 - node-fetch-native: 1.6.4 + ohash: 1.1.4 pathe: 1.1.2 - ufo: 1.5.3 + ufo: 1.5.4 uri-js@4.4.1: dependencies: @@ -2937,38 +2885,33 @@ snapshots: siginfo: 2.0.0 stackback: 0.0.2 - workerd@1.20240208.0: - optionalDependencies: - '@cloudflare/workerd-darwin-64': 1.20240208.0 - '@cloudflare/workerd-darwin-arm64': 1.20240208.0 - '@cloudflare/workerd-linux-64': 1.20240208.0 - '@cloudflare/workerd-linux-arm64': 1.20240208.0 - '@cloudflare/workerd-windows-64': 1.20240208.0 - - workerd@1.20240610.1: + workerd@1.20240909.0: optionalDependencies: - '@cloudflare/workerd-darwin-64': 1.20240610.1 - '@cloudflare/workerd-darwin-arm64': 1.20240610.1 - '@cloudflare/workerd-linux-64': 1.20240610.1 - '@cloudflare/workerd-linux-arm64': 1.20240610.1 - '@cloudflare/workerd-windows-64': 1.20240610.1 + '@cloudflare/workerd-darwin-64': 1.20240909.0 + '@cloudflare/workerd-darwin-arm64': 1.20240909.0 + '@cloudflare/workerd-linux-64': 1.20240909.0 + '@cloudflare/workerd-linux-arm64': 1.20240909.0 + '@cloudflare/workerd-windows-64': 1.20240909.0 - wrangler@3.61.0(@cloudflare/workers-types@4.20240614.0): + wrangler@3.78.5(@cloudflare/workers-types@4.20240614.0): dependencies: - '@cloudflare/kv-asset-handler': 0.3.3 + '@cloudflare/kv-asset-handler': 0.3.4 + '@cloudflare/workers-shared': 0.5.3 '@esbuild-plugins/node-globals-polyfill': 0.2.3(esbuild@0.17.19) '@esbuild-plugins/node-modules-polyfill': 0.2.2(esbuild@0.17.19) blake3-wasm: 2.1.5 chokidar: 3.6.0 + date-fns: 3.6.0 esbuild: 0.17.19 - miniflare: 3.20240610.1 + miniflare: 3.20240909.3 nanoid: 3.3.7 - path-to-regexp: 6.2.2 + path-to-regexp: 6.3.0 resolve: 1.22.8 resolve.exports: 2.0.2 selfsigned: 2.4.1 source-map: 0.6.1 - unenv: unenv-nightly@1.10.0-1717606461.a117952 + unenv: unenv-nightly@2.0.0-1726478054.1e87097 + workerd: 1.20240909.0 xxhash-wasm: 1.0.2 optionalDependencies: '@cloudflare/workers-types': 4.20240614.0 diff --git a/push/.gitignore b/push/.gitignore new file mode 100644 index 0000000..e76beb6 --- /dev/null +++ b/push/.gitignore @@ -0,0 +1,171 @@ +# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore + +# Logs + +logs +_.log +npm-debug.log_ +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) + +report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json + +# Runtime data +.output-image + +pids +_.pid +_.seed +\*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover + +lib-cov + +# Coverage directory used by tools like istanbul + +coverage +\*.lcov + +# nyc test coverage + +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) + +.grunt + +# Bower dependency directory (https://bower.io/) + +bower_components + +# node-waf configuration + +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) + +build/Release + +# Dependency directories + +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) + +web_modules/ + +# TypeScript cache + +\*.tsbuildinfo + +# Optional npm cache directory + +.npm + +# Optional eslint cache + +.eslintcache + +# Optional stylelint cache + +.stylelintcache + +# Microbundle cache + +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history + +.node_repl_history + +# Output of 'npm pack' + +\*.tgz + +# Yarn Integrity file + +.yarn-integrity + +# dotenv environment variable files + +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) + +.cache +.parcel-cache + +# Next.js build output + +.next +out + +# Nuxt.js build / generate output + +.nuxt +dist + +# Gatsby files + +.cache/ + +# Comment in the public line in if your project uses Gatsby and not Next.js + +# https://nextjs.org/blog/next-9-1#public-directory-support + +# public + +# vuepress build output + +.vuepress/dist + +# vuepress v2.x temp and cache directory + +.temp +.cache + +# Docusaurus cache and generated files + +.docusaurus + +# Serverless directories + +.serverless/ + +# FuseBox cache + +.fusebox/ + +# DynamoDB Local files + +.dynamodb/ + +# TernJS port file + +.tern-port + +# Stores VSCode versions used for testing VSCode extensions + +.vscode-test + +# yarn v2 + +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.\* +*.tar diff --git a/push/README.md b/push/README.md new file mode 100644 index 0000000..f07efe4 --- /dev/null +++ b/push/README.md @@ -0,0 +1,57 @@ +# Push chunked images to serverless-registry + +This is a pretty simple tool that allows you to push docker images to serverless-registry +when the layers are too big. + +## How to run + +```bash +bun install +``` + +Then: + +```bash +docker tag my-image:latest $IMAGE_URI +echo $PASSWORD | USERNAME_REGISTRY= bun run index.ts $IMAGE_URI +``` + +## How does it work + +It exports the image using `docker save`, then pushes each layer to the registry. +It only supports `Basic` authentication as it's the one that serverless-registry uses. + +It's able to chunk layers depending on the header `oci-chunk-max-length` returned by the registry when the client +creates an upload. + +## Interesting output folders + +- Every \*.tar in the push folder is the exported image from docker, which is extracted into `.cache`. +- Then it's compressed to gzip and saved into `.cache`. Files that end in `-ptr` have a digest in the content that + refers to another layer in the folder. + +There is a few more workarounds in the code like having to use node-fetch as Bun overrides the Content-Length +header from the caller. + +This pushing tool is just a workaround on the Worker limitation in request body. + +## Pushing locally + +To push to a localhost registry you need to set the environment variable INSECURE_HTTP_PUSH=true. + +## Other options? + +If the reader is interested, there is more options or alternatives to have a faster pushing chunk tool: + +1. We could redesign this tool to run with the Docker overlay storage. The biggest con is having to run a + privileged docker container that uses https://github.com/justincormack/nsenter1 to access the Docker storage. + +2. Create a localhost proxy that understands V2 registry protocol, and chunks things accordingly. The con is that + docker has issues pushing to localhost registries. + +3. Use podman like described in this [very informative Github comment](https://github.com/cloudflare/serverless-registry/issues/42#issuecomment-2366997382). + +## Improvements + +1. Use zstd instead. +2. Have a better unit test suite. diff --git a/push/bun.lockb b/push/bun.lockb new file mode 100755 index 0000000..9b088e0 Binary files /dev/null and b/push/bun.lockb differ diff --git a/push/index.ts b/push/index.ts new file mode 100644 index 0000000..ecec389 --- /dev/null +++ b/push/index.ts @@ -0,0 +1,363 @@ +import { $, CryptoHasher, file, write } from "bun"; +import tar from "tar-fs"; +import fs from "node:fs"; + +const username = process.env["USERNAME_REGISTRY"]; +const password = fs.readFileSync(process.stdin.fd, "utf-8"); +if (!username || !password) { + console.error("username or password not defined, push might not be able to authenticate with registry"); +} + +const image = process.argv[2]; +if (image === undefined) { + console.error("usage: bun run index.ts "); + process.exit(1); +} + +// Check if the image has already been saved from Docker +const imageMetadata = (await $`docker images --format json ${image}`.json()) as { ID: string }; +const tarFile = imageMetadata.ID + ".tar"; +const imagePath = ".output-image"; +if (!(await file(tarFile).exists())) { + const output = await $`docker save ${image} --output ${tarFile}`; + + if (output.exitCode != 0) { + console.error("Error saving image", image, output.text()); + process.exit(1); + } + + const extract = tar.extract(imagePath); + + await Bun.file(tarFile) + .stream() + .pipeTo( + new WritableStream({ + write(value) { + return new Promise((res, rej) => { + extract.write(value, (err) => { + if (err) { + rej(err); + return; + } + }); + extract.once("drain", () => { + res(); + }); + }); + }, + close() { + extract.end(); + }, + }), + ); +} + +type DockerSaveConfigManifest = { + Config: string; + Layers: string[]; +}[]; + +import path from "path"; +const manifests = (await Bun.file(path.join(imagePath, "manifest.json")).json()) as DockerSaveConfigManifest; + +if (manifests.length == 0) { + console.error("unexpected manifest of length 0"); + process.exit(1); +} + +if (manifests.length > 1) { + console.warn("Manifest resolved to multiple images, picking the first one"); +} + +import plimit from "p-limit"; +const pool = plimit(5); +import zlib from "node:zlib"; +import stream from "node:stream"; +import { mkdir, rename, rm } from "node:fs/promises"; + +const cacheFolder = ".cache"; + +await mkdir(cacheFolder, { recursive: true }); + +const [manifest] = manifests; +const tasks = []; + +// Iterate through every layer, read it and compress to a file +for (const layer of manifest.Layers) { + tasks.push( + pool(async () => { + let layerPath = path.join(imagePath, layer); + // docker likes to put stuff in two ways: + // 1. blobs/sha256/ + // 2. /layer.tar + // + // This handles both cases. + let layerName = layer.endsWith(".tar") ? path.dirname(layer) : path.basename(layer); + + const layerCachePath = path.join(cacheFolder, layerName + "-ptr"); + { + const layerCacheGzip = file(layerCachePath); + if (await layerCacheGzip.exists()) { + const compressedDigest = await layerCacheGzip.text(); + return compressedDigest; + } + } + + const inprogressPath = path.join(cacheFolder, layerName + "-in-progress"); + + await rm(inprogressPath, { recursive: true }); + const layerCacheGzip = file(inprogressPath, {}); + + const cacheWriter = layerCacheGzip.writer(); + const hasher = new Bun.CryptoHasher("sha256"); + const gzipStream = zlib.createGzip({ level: 9 }); + gzipStream.pipe( + new stream.Writable({ + write(value, _, callback) { + hasher.update(value); + cacheWriter.write(value); + callback(); + }, + }), + ); + + await file(layerPath) + .stream() + .pipeTo( + new WritableStream({ + write(value) { + return new Promise((res, rej) => { + gzipStream.write(value, "binary", (err) => { + if (err) { + rej(err); + return; + } + res(); + }); + }); + }, + close() { + gzipStream.end(); + }, + }), + ); + + await cacheWriter.flush(); + await cacheWriter.end(); + const digest = hasher.digest("hex"); + await rename(inprogressPath, path.join(cacheFolder, digest)); + await write(layerCachePath, digest); + return digest; + }), + ); +} + +const configManifest = path.join(imagePath, manifest.Config); +const config = await file(configManifest).text(); +const configDigest = new CryptoHasher("sha256").update(config).digest("hex"); + +const compressedDigests = await Promise.all(tasks); + +const proto = process.env["INSECURE_HTTP_PUSH"] === "true" ? "http" : "https"; +if (proto === "http") { + console.error("!! Using plain HTTP !!"); +} + +const pushTasks = []; +const url = new URL(proto + "://" + image); +const imageHost = url.host; +const imageRepositoryPathParts = url.pathname.split(":"); +const imageRepositoryPath = imageRepositoryPathParts.slice(0, imageRepositoryPathParts.length - 1).join(":"); +const tag = + imageRepositoryPathParts.length > 1 ? imageRepositoryPathParts[imageRepositoryPathParts.length - 1] : "latest"; + +import fetchNode from "node-fetch"; +import { ReadableLimiter } from "./limiter"; + +const cred = `Basic ${btoa(`${username}:${password}`)}`; + +// pushLayer accepts the target digest, the stream to read from, and the total layer size. +// It will do the entire push process by itself. +async function pushLayer(layerDigest: string, readableStream: ReadableStream, totalLayerSize: number) { + const headers = new Headers({ + authorization: cred, + }); + const layerExistsURL = `${proto}://${imageHost}/v2${imageRepositoryPath}/blobs/${layerDigest}`; + const layerExistsResponse = await fetch(layerExistsURL, { + headers, + method: "HEAD", + }); + + if (!layerExistsResponse.ok && layerExistsResponse.status !== 404) { + throw new Error(`${layerExistsURL} responded ${layerExistsResponse.status}: ${await layerExistsResponse.text()}`); + } + + if (layerExistsResponse.ok) { + console.log(`${layerDigest} already exists...`); + return; + } + + const createUploadURL = `${proto}://${imageHost}/v2${imageRepositoryPath}/blobs/uploads/`; + const createUploadResponse = await fetch(createUploadURL, { + headers, + method: "POST", + }); + if (!createUploadResponse.ok) { + throw new Error( + `${createUploadURL} responded ${createUploadResponse.status}: ${await createUploadResponse.text()}`, + ); + } + + const maxChunkLength = +(createUploadResponse.headers.get("oci-chunk-max-length") ?? 500 * 1024 * 1024); + if (isNaN(maxChunkLength)) { + throw new Error(`oci-chunk-max-length header is malformed (not a number)`); + } + + const reader = readableStream.getReader(); + const uploadId = createUploadResponse.headers.get("docker-upload-uuid"); + if (uploadId === null) { + throw new Error("Docker-Upload-UUID not defined in headers"); + } + + let location = createUploadResponse.headers.get("location") ?? `/v2${imageRepositoryPath}/blobs/uploads/${uploadId}`; + const putChunkUploadURL = `${proto}://${imageHost}${location}`; + const maxToWrite = Math.min(maxChunkLength, totalLayerSize); + let end = Math.min(maxChunkLength, totalLayerSize); + let written = 0; + let previousReadable: ReadableLimiter | undefined; + let totalLayerSizeLeft = totalLayerSize; + while (totalLayerSizeLeft > 0) { + const range = `0-${Math.min(end, totalLayerSize) - 1}`; + const current = new ReadableLimiter(reader as ReadableStreamDefaultReader, maxToWrite, previousReadable); + + // we have to do fetchNode because Bun doesn't allow setting custom Content-Length. + // https://github.com/oven-sh/bun/issues/10507 + const putChunkResult = await fetchNode(putChunkUploadURL, { + method: "PATCH", + body: current, + headers: new Headers({ + "range": range, + "authorization": cred, + "content-length": `${Math.min(totalLayerSizeLeft, maxToWrite)}`, + }), + }); + if (!putChunkResult.ok) { + throw new Error( + `uploading chunk ${putChunkUploadURL} returned ${putChunkResult.status}: ${await putChunkResult.text()}`, + ); + } + + const rangeResponse = putChunkResult.headers.get("range"); + if (rangeResponse !== range) { + throw new Error(`unexpected Range header ${rangeResponse}, expected ${range}`); + } + + previousReadable = current; + totalLayerSizeLeft -= previousReadable.written; + written += previousReadable.written; + end += previousReadable.written; + location = putChunkResult.headers.get("location") ?? location; + if (totalLayerSizeLeft != 0) console.log(layerDigest + ":", totalLayerSizeLeft, "upload bytes left."); + } + + const range = `0-${written - 1}`; + const uploadURL = new URL(`${proto}://${imageHost}${location}`); + uploadURL.searchParams.append("digest", layerDigest); + + const response = await fetch(uploadURL.toString(), { + method: "PUT", + headers: new Headers({ + Range: range, + Authorization: cred, + }), + }); + if (!response.ok) { + throw new Error(`${uploadURL.toString()} failed with ${response.status}: ${await response.text()}`); + } + + console.log("pushed", layerDigest); +} + +const layersManifest = [] as { + readonly mediaType: "application/vnd.oci.image.layer.v1.tar+gzip"; + readonly size: number; + readonly digest: `sha256:${string}`; +}[]; + +for (const compressedDigest of compressedDigests) { + let layer = file(path.join(cacheFolder, compressedDigest)); + layersManifest.push({ + mediaType: "application/vnd.oci.image.layer.v1.tar+gzip", + size: layer.size, + digest: `sha256:${compressedDigest}`, + } as const); + tasks.push( + pool(async () => { + const maxRetries = +(process.env["MAX_RETRIES"] ?? 3); + if (isNaN(maxRetries)) throw new Error("MAX_RETRIES is not a number"); + + for (let i = 0; i < maxRetries; i++) { + const digest = `sha256:${compressedDigest}`; + const stream = layer.stream(); + try { + await pushLayer(digest, stream, layer.size); + return; + } catch (err) { + console.error(digest, "failed to upload", maxRetries - i - 1, "left...", err); + layer = file(path.join(cacheFolder, compressedDigest)); + } + } + }), + ); +} + +pushTasks.push( + pool(async () => { + await pushLayer( + `sha256:${configDigest}`, + new ReadableStream({ + pull(controller) { + controller.enqueue(config); + controller.close(); + }, + }), + config.length, + ); + }), +); + +const promises = await Promise.allSettled(pushTasks); +for (const promise of promises) { + if (promise.status === "rejected") process.exit(1); +} + +const manifestObject = { + schemaVersion: 2, + mediaType: "application/vnd.oci.image.manifest.v1+json", + config: { + mediaType: "application/vnd.oci.image.config.v1+json", + size: config.length, + digest: `sha256:${configDigest}`, + }, + layers: layersManifest, +} as const; + +const manifestUploadURL = `${proto}://${imageHost}/v2${imageRepositoryPath}/manifests/${tag}`; +const responseManifestUpload = await fetch(manifestUploadURL, { + headers: { + "authorization": cred, + "content-type": manifestObject.mediaType, + }, + body: JSON.stringify(manifestObject), + method: "PUT", +}); + +if (!responseManifestUpload.ok) { + throw new Error( + `manifest upload ${manifestUploadURL} returned ${ + responseManifestUpload.status + }: ${await responseManifestUpload.text()}`, + ); +} +console.log(manifestObject); +console.log("OK"); diff --git a/push/limiter.ts b/push/limiter.ts new file mode 100644 index 0000000..422e068 --- /dev/null +++ b/push/limiter.ts @@ -0,0 +1,59 @@ +import stream from "node:stream"; + +// ReadableLimiter is a class that limits the amount of +// data to read. It will never return more data than the configured limit. +// However, it doesn't guarantee that it reads less than the limit from the passed reader. +export class ReadableLimiter extends stream.Readable { + public written: number = 0; + private leftover: Uint8Array | undefined; + + constructor( + // reader will be used to read bytes until limit. + // it might read more than 'limit' due to Bun not supporting byob. + // We workaround this by keeping track of the previousReader that the caller should pass. + private reader: ReadableStreamDefaultReader, + private limit: number, + previousReader?: ReadableLimiter, + ) { + super(); + + if (previousReader) this.leftover = previousReader.leftover; + } + + _read(): void { + if (this.limit === 0) { + this.push(null); + } + + if (this.leftover !== undefined) { + const toPushNow = this.leftover.slice(0, this.limit); + this.leftover = this.leftover.slice(this.limit); + this.push(toPushNow); + this.limit -= toPushNow.length; + this.written += toPushNow.length; + + // if no leftovers left to write from before + if (this.leftover.length == 0) { + this.leftover = undefined; + } + return; + } + + this.reader.read().then((result) => { + if (result.done) return this.push(null); + + let arr = result.value as Uint8Array; + if (arr.length > this.limit) { + const toPushNow = arr.slice(0, this.limit); + this.leftover = arr.slice(this.limit); + arr = toPushNow; + } + + if (arr.length === 0) return this.push(null); + + this.push(arr); + this.limit -= arr.length; + this.written += arr.length; + }); + } +} diff --git a/push/package.json b/push/package.json new file mode 100644 index 0000000..54940d1 --- /dev/null +++ b/push/package.json @@ -0,0 +1,17 @@ +{ + "name": "push", + "module": "index.ts", + "type": "module", + "devDependencies": { + "@types/tar-fs": "^2.0.4", + "bun-types": "latest" + }, + "peerDependencies": { + "typescript": "^5.0.0" + }, + "dependencies": { + "node-fetch": "^3.3.2", + "p-limit": "^6.1.0", + "tar-fs": "^3.0.6" + } +} diff --git a/push/tsconfig.json b/push/tsconfig.json new file mode 100644 index 0000000..1449bc3 --- /dev/null +++ b/push/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "lib": ["ESNext"], + "module": "esnext", + "target": "esnext", + "moduleResolution": "bundler", + "moduleDetection": "force", + "allowImportingTsExtensions": true, + "noEmit": true, + "composite": true, + "strict": true, + "downlevelIteration": true, + "skipLibCheck": true, + "jsx": "preserve", + "allowSyntheticDefaultImports": true, + "forceConsistentCasingInFileNames": true, + "allowJs": true, + "types": [ + "bun-types" // add Bun global + ] + } +} diff --git a/src/chunk.ts b/src/chunk.ts index 628d041..07c4da5 100644 --- a/src/chunk.ts +++ b/src/chunk.ts @@ -9,7 +9,7 @@ export const MINIMUM_CHUNK = 1024 * 1024 * 5; export const MAXIMUM_CHUNK = MINIMUM_CHUNK * 1024; // 500MB -export const MAXIMUM_CHUNK_UPLOAD_SIZE = 1000 * 1000 * 500; +export const MAXIMUM_CHUNK_UPLOAD_SIZE = 1000 * 1000 * 100; export const getHelperR2Path = (id: string): string => { return `${id}-helper`;