From 04aa614b51a6dc62ad6a05ac746fafd67541942b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 18:35:38 +0000 Subject: [PATCH] Deploy to GitHub pages --- .nojekyll | 0 404.html | 1 + CNAME | 1 + _next/data/CivD-M4DIwKkMNDuPXONW/index.json | 1 + .../crafting-containers-by-hand/cgroups.json | 1 + .../crafting-containers-by-hand/chroot.json | 1 + .../namespaces.json | 1 + .../what-are-containers.json | 1 + .../lessons/docker-features/bind-mounts.json | 1 + .../docker-features/dev-containers.json | 1 + .../networking-with-docker.json | 1 + .../lessons/docker-features/volumes.json | 1 + .../lessons/docker/docker-cli.json | 1 + .../docker/docker-images-with-docker.json | 1 + .../lessons/docker/docker-images.json | 1 + .../lessons/docker/javascript-on-docker.json | 1 + .../lessons/docker/tags.json | 1 + .../lessons/dockerfiles/a-note-on-expose.json | 1 + .../build-a-more-complicated-nodejs-app.json | 1 + .../dockerfiles/build-a-nodejs-app.json | 1 + .../dockerfiles/intro-to-dockerfiles.json | 1 + .../lessons/dockerfiles/layers.json | 1 + .../making-tiny-containers/alpine-linux.json | 1 + .../making-tiny-containers/distroless.json | 1 + ...aking-our-own-alpine-nodejs-container.json | 1 + .../multi-stage-builds.json | 1 + .../static-asset-project.json | 1 + .../docker-compose.json | 1 + .../multi-container-projects/kompose.json | 1 + .../multi-container-projects/kubernetes.json | 1 + .../lessons/welcome/introduction.json | 1 + .../lessons/welcome/set-up.json | 1 + .../lessons/wrap-up/conclusion.json | 1 + .../lessons/wrap-up/docker-alternatives.json | 1 + .../CivD-M4DIwKkMNDuPXONW/_buildManifest.js | 1 + .../CivD-M4DIwKkMNDuPXONW/_ssgManifest.js | 1 + .../chunks/framework-5429a50ba5373c56.js | 33 +++++ _next/static/chunks/main-d2ba44903cd47711.js | 1 + .../chunks/pages/_app-0bd34052de026ce4.js | 1 + .../chunks/pages/_error-5a00309fd5f4b49e.js | 1 + .../chunks/pages/index-ba23832aef4beaef.js | 1 + .../[section]/[slug]-ba225542c35827e2.js | 1 + .../chunks/polyfills-c67a75d1b6f99dc8.js | 1 + .../static/chunks/webpack-4e7214a60fad8e88.js | 1 + _next/static/css/cfac17cb37b27821.css | 13 ++ _next/static/media/fa-brands-400.5d18d427.ttf | Bin 0 -> 209128 bytes .../static/media/fa-brands-400.87587a68.woff2 | Bin 0 -> 117852 bytes .../media/fa-regular-400.3ccdbd3d.woff2 | Bin 0 -> 25392 bytes .../static/media/fa-regular-400.81482cd4.ttf | Bin 0 -> 67860 bytes .../static/media/fa-solid-900.0b0cc8a6.woff2 | Bin 0 -> 156400 bytes _next/static/media/fa-solid-900.69d3141a.ttf | Bin 0 -> 420332 bytes .../media/fa-v4compatibility.0e21f5b1.woff2 | Bin 0 -> 4792 bytes .../media/fa-v4compatibility.2c070fd2.ttf | Bin 0 -> 10832 bytes images/BRAND-WHearts.png | Bin 0 -> 229270 bytes images/apple-touch-icon.png | Bin 0 -> 14889 bytes images/author.jpg | Bin 0 -> 19799 bytes images/course-icon.png | Bin 0 -> 35157 bytes images/dev-containers.jpg | Bin 0 -> 69902 bytes images/favicon-16x16.png | Bin 0 -> 598 bytes images/favicon-32x32.png | Bin 0 -> 1493 bytes images/favicon.ico | Bin 0 -> 15406 bytes images/kubernetes1.png | Bin 0 -> 112882 bytes images/kubernetes2.png | Bin 0 -> 148824 bytes images/social-share-cover.jpg | Bin 0 -> 77290 bytes images/vscode-ui.png | Bin 0 -> 104611 bytes index.html | 1 + lessons.csv | 31 ++++ .../crafting-containers-by-hand/cgroups.html | 132 ++++++++++++++++++ .../crafting-containers-by-hand/chroot.html | 40 ++++++ .../namespaces.html | 39 ++++++ .../what-are-containers.html | 26 ++++ lessons/docker-features/bind-mounts.html | 23 +++ lessons/docker-features/dev-containers.html | 25 ++++ .../networking-with-docker.html | 67 +++++++++ lessons/docker-features/volumes.html | 42 ++++++ lessons/docker/docker-cli.html | 61 ++++++++ lessons/docker/docker-images-with-docker.html | 28 ++++ lessons/docker/docker-images.html | 27 ++++ lessons/docker/javascript-on-docker.html | 33 +++++ lessons/docker/tags.html | 7 + lessons/dockerfiles/a-note-on-expose.html | 4 + .../build-a-more-complicated-nodejs-app.html | 66 +++++++++ lessons/dockerfiles/build-a-nodejs-app.html | 56 ++++++++ lessons/dockerfiles/intro-to-dockerfiles.html | 32 +++++ lessons/dockerfiles/layers.html | 22 +++ .../making-tiny-containers/alpine-linux.html | 25 ++++ .../making-tiny-containers/distroless.html | 27 ++++ ...aking-our-own-alpine-nodejs-container.html | 51 +++++++ .../multi-stage-builds.html | 31 ++++ .../static-asset-project.html | 32 +++++ .../docker-compose.html | 55 ++++++++ lessons/multi-container-projects/kompose.html | 60 ++++++++ .../multi-container-projects/kubernetes.html | 28 ++++ lessons/welcome/introduction.html | 24 ++++ lessons/welcome/set-up.html | 43 ++++++ lessons/wrap-up/conclusion.html | 29 ++++ lessons/wrap-up/docker-alternatives.html | 37 +++++ 97 files changed, 1292 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 CNAME create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/index.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/cgroups.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/chroot.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/namespaces.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/what-are-containers.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/bind-mounts.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/dev-containers.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/networking-with-docker.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/volumes.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/docker-cli.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/docker-images-with-docker.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/docker-images.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/javascript-on-docker.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/tags.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/a-note-on-expose.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/build-a-more-complicated-nodejs-app.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/build-a-nodejs-app.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/intro-to-dockerfiles.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/layers.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/alpine-linux.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/distroless.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/making-our-own-alpine-nodejs-container.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/multi-stage-builds.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/static-asset-project.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/multi-container-projects/docker-compose.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/multi-container-projects/kompose.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/multi-container-projects/kubernetes.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/welcome/introduction.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/welcome/set-up.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/wrap-up/conclusion.json create mode 100644 _next/data/CivD-M4DIwKkMNDuPXONW/lessons/wrap-up/docker-alternatives.json create mode 100644 _next/static/CivD-M4DIwKkMNDuPXONW/_buildManifest.js create mode 100644 _next/static/CivD-M4DIwKkMNDuPXONW/_ssgManifest.js create mode 100644 _next/static/chunks/framework-5429a50ba5373c56.js create mode 100644 _next/static/chunks/main-d2ba44903cd47711.js create mode 100644 _next/static/chunks/pages/_app-0bd34052de026ce4.js create mode 100644 _next/static/chunks/pages/_error-5a00309fd5f4b49e.js create mode 100644 _next/static/chunks/pages/index-ba23832aef4beaef.js create mode 100644 _next/static/chunks/pages/lessons/[section]/[slug]-ba225542c35827e2.js create mode 100644 _next/static/chunks/polyfills-c67a75d1b6f99dc8.js create mode 100644 _next/static/chunks/webpack-4e7214a60fad8e88.js create mode 100644 _next/static/css/cfac17cb37b27821.css create mode 100644 _next/static/media/fa-brands-400.5d18d427.ttf create mode 100644 _next/static/media/fa-brands-400.87587a68.woff2 create mode 100644 _next/static/media/fa-regular-400.3ccdbd3d.woff2 create mode 100644 _next/static/media/fa-regular-400.81482cd4.ttf create mode 100644 _next/static/media/fa-solid-900.0b0cc8a6.woff2 create mode 100644 _next/static/media/fa-solid-900.69d3141a.ttf create mode 100644 _next/static/media/fa-v4compatibility.0e21f5b1.woff2 create mode 100644 _next/static/media/fa-v4compatibility.2c070fd2.ttf create mode 100755 images/BRAND-WHearts.png create mode 100644 images/apple-touch-icon.png create mode 100644 images/author.jpg create mode 100644 images/course-icon.png create mode 100644 images/dev-containers.jpg create mode 100644 images/favicon-16x16.png create mode 100644 images/favicon-32x32.png create mode 100644 images/favicon.ico create mode 100644 images/kubernetes1.png create mode 100644 images/kubernetes2.png create mode 100644 images/social-share-cover.jpg create mode 100644 images/vscode-ui.png create mode 100644 index.html create mode 100644 lessons.csv create mode 100644 lessons/crafting-containers-by-hand/cgroups.html create mode 100644 lessons/crafting-containers-by-hand/chroot.html create mode 100644 lessons/crafting-containers-by-hand/namespaces.html create mode 100644 lessons/crafting-containers-by-hand/what-are-containers.html create mode 100644 lessons/docker-features/bind-mounts.html create mode 100644 lessons/docker-features/dev-containers.html create mode 100644 lessons/docker-features/networking-with-docker.html create mode 100644 lessons/docker-features/volumes.html create mode 100644 lessons/docker/docker-cli.html create mode 100644 lessons/docker/docker-images-with-docker.html create mode 100644 lessons/docker/docker-images.html create mode 100644 lessons/docker/javascript-on-docker.html create mode 100644 lessons/docker/tags.html create mode 100644 lessons/dockerfiles/a-note-on-expose.html create mode 100644 lessons/dockerfiles/build-a-more-complicated-nodejs-app.html create mode 100644 lessons/dockerfiles/build-a-nodejs-app.html create mode 100644 lessons/dockerfiles/intro-to-dockerfiles.html create mode 100644 lessons/dockerfiles/layers.html create mode 100644 lessons/making-tiny-containers/alpine-linux.html create mode 100644 lessons/making-tiny-containers/distroless.html create mode 100644 lessons/making-tiny-containers/making-our-own-alpine-nodejs-container.html create mode 100644 lessons/making-tiny-containers/multi-stage-builds.html create mode 100644 lessons/making-tiny-containers/static-asset-project.html create mode 100644 lessons/multi-container-projects/docker-compose.html create mode 100644 lessons/multi-container-projects/kompose.html create mode 100644 lessons/multi-container-projects/kubernetes.html create mode 100644 lessons/welcome/introduction.html create mode 100644 lessons/welcome/set-up.html create mode 100644 lessons/wrap-up/conclusion.html create mode 100644 lessons/wrap-up/docker-alternatives.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..3648ce6 --- /dev/null +++ b/404.html @@ -0,0 +1 @@ +
Okay, so now we've hidden the processes from Eve so Bob and Alice can engage in commerce in privacy and peace. So we're all good, right? They can no longer mess each other, right? Not quite. We're almost there.
\nSo now say it's Black Friday, Boxing Day or Singles' Day (three of the biggest shopping days in the year, pick the one that makes the most sense to you š) and Bob and Alice are gearing up for their biggest sales day of the year. Everything is ready to go and at 9:00AM their site suddenly goes down without warning. What happened!? They log on to their chroot'd, unshare'd shell on your server and see that the CPU is pegged at 100% and there's no more memory available to allocate! Oh no! What happened?
\nThe first explanation could be that Eve has her site running on another virtual server and simple logged on and ran a malicious script that ate up all the available resources so that Bob and Alice so that their sites would go down and Eve would be the only site that was up, increasing her sales.
\nHowever another, possibly more likely explanation is that both Bob's and Alice's sites got busy at the same time and that in-and-of-itself took all the resources without any malice involved, taking down their sites and everyone else on the server. Or perhaps Bob's site had a memory leak and that was enough to take all the resources available.
\nSuffice to say, we still have a problem. Every isolated environment has access to all physical resources of the server. There's no isolation of physical components from these environments.
\nEnter the hero of this story: cgroups, or control groups. Google saw this same problem when building their own infrastructure and wanted to protect runaway processes from taking down entire servers and made this idea of cgroups so you can say "this isolated environment only gets so much CPU, so much memory, etc. and once it's out of those it's out-of-luck, it won't get any more."
\nThis is a bit more difficult to accomplish but let's go ahead and give it a shot.
\n\n\ncgroups v2 is now the standard. Run
\ngrep -c cgroup /proc/mounts
in your terminal. If the number that is greater than one, the system you're using is cgroups v1. Click here if you want to try to get your system from cgroup v1 to v2. As this is fairly involved, I would just suggest using a more recent version of Ubuntu as it will have cgroups v2 on it.If you want to learn cgroups v1 (which I would not suggest, they're getting phased out), the first version of this course teaches them.
\n
cgroups as we have said allow you to move processes and their children into groups which then allow you to limit various aspects of them. Imagine you're running a single physical server for Google with both Maps and GMail having virtual servers on it. If Maps ships an infinite loop bug and it pins the CPU usage of the server to 100%, you only want Maps to go down and not GMail just because it happens to be colocated with Maps. Let's see how to do that.
\nYou interact with cgroups by a pseudo-file system. Honestly the whole interface feels weird to me but that is what it is! Inside your #2 terminal (the non-unshared one) run cd /sys/fs/cgroup
and then run ls
. You'll see a bunch of "files" that look like cpu.max
, cgroup.procs
, and memory.high
. Each one of these represents a setting that you can play with with regard to the cgroup. In this case, we are looking at the root cgroup: all cgroups will be children of this root cgroup. The way you make your own cgroup is by creating a folder inside of the cgroup.
# creates the cgroup\nmkdir /sys/fs/cgroup/sandbox\n\n# look at all the files created automatically\nls /sys/fs/cgroup/sandbox\n
We now have a sandbox cgroup which is a child of the root cgroup and can putting limits on it! If we wanted to create a child of sandbox, as you may have guessed, just create another folder inside of sandbox.
\nLet's move our unshared environment into the cgroup. Every process belongs to exactly one cgroup. If you move a process to a cgroup, it will automatically be removed from the cgroup it was in. If we move our unshared bash process from the root cgroup to the sandbox cgroup, it will be removed from the root cgroup without you doing anything.
\n# Find your isolated bash PID, it's the bash one immediately after the unshare\nps aux\n\n# should see the process in the root cgroup\ncat /sys/fs/cgroup/cgroup.procs\n\n# puts the unshared env into the cgroup called sandbox\necho <PID> > /sys/fs/cgroup/sandbox/cgroup.procs\n\n# should see the process in the sandbox cgroup\ncat /sys/fs/cgroup/sandbox/cgroup.procs\n\n# should see the process no longer in the root cgroup - processes belong to exactly 1 cgroup\ncat /sys/fs/cgroup/cgroup.proc\n
We now have moved our unshared bash process into a cgroup. We haven't placed any limits on it yet but it's there, ready to be managed. We have a minor problem at the moment though that we need to solve.
\n# should see all the available controllers\ncat /sys/fs/cgroup/cgroup.controllers\n\n# there's no controllers\ncat /sys/fs/cgroup/sandbox/cgroup.controllers\n\n# there's no controllers enabled its children\ncat /sys/fs/cgroup/cgroup.subtree_control\n
You have to enable controllers for the children and none of them are enabled at the moment. You can see the root cgroup has them all enabled, but hasn't enabled them in its subtree_control so thus none are available in sandbox's controllers. Easy, right? We just add them to subtree_control, right? Yes, but one probelm: you can't add new subtree_control configs while the cgroup itself has processes in it. So we're going to create another cgroup, add the rest of the processes to that one, and then enable the subtree_control configs for the root cgroup.
\n# make new cgroup for the rest of the processes, you can't modify cgroups that have processes and by default Docker doesn't include any subtree_controllers\nmkdir /sys/fs/cgroup/other-procs\n\n# see all the processes you need to move, rerun each time after you add as it may move multiple processes at once due to some being parent / child\ncat /sys/fs/cgroup/cgroups.proc\n\n# you have to do this one at a time for each process\necho <PID> > /sys/fs/cgroup/other-procs/cgroup.procs\n\n# verify all the processes have been moved\ncat /sys/fs/cgroup/cgroups.proc\n\n# add the controllers\necho "+cpuset +cpu +io +memory +hugetlb +pids +rdma" > /sys/fs/cgroup/cgroup.subtree_control\n\n# notice how few files there are\nls /sys/fs/cgroup/sandbox\n\n# all the controllers now available\ncat /sys/fs/cgroup/sandbox/cgroup.controllers\n\n# notice how many more files there are now\nls /sys/fs/cgroup/sandbox\n
We did it! We went ahead and added all the possible controllers but normally you should just add just the ones you need. If you want to learn more about what each of them does, the kernel docs are quite readable.
\nLet's get a third terminal going. From your host OS (Windows or macOS or your own Linux distro, not within Docker) run another docker exec -it docker-host bash
. That way we can have #1 inside the unshared environment, #2 running our commands, and #3 giving us a visual display of what's going with htop
, a visual tool for seeing what process, CPU cores, and memory are doing.
So, let's go three little exercises of what we can do with a cgroup. First let's make it so the unshared environment only has access to 80MB of memory instead of all of it.
\n# a cool visual representation of CPU and RAM being used\napt-get install htop\n\n# from #3 so we can watch what's happening\nhtop\n\n# run this from #1 terminal and watch it in htop to see it consume about a gig of RAM and 100% of CPU core\nyes | tr \\\\n x | head -c 1048576000 | grep n\n\n# from #2, (you can get the PID from htop) to stop the CPU from being pegged and memory from being consumed\nkill -9 <PID of yes>\n\n# should see max, so the memory is unlimited\ncat /sys/fs/cgroup/sandbox/memory.max\n\n# set the limit to 80MB of RAM (the number is 80MB in bytes)\necho 83886080 > /sys/fs/cgroup/sandbox/memory.max\n\n# from inside #1, see it limit the RAM taken up; because the RAM is limited, the CPU usage is limited\nyes | tr \\\\n x | head -c 1048576000 | grep n\n
I think this is very cool. We just made it so our unshared environment only has access to 80MB of RAM and so despite there being a script being run to literally just consume RAM, it was limited to only consuming 80MB of it.
\nHowever, as you saw, the user inside of the container could still peg the CPU if they wanted to. Let's fix that. Let's only give them 5% of a core.
\n# inside #1 / the cgroup/unshare ā this will peg one core of a CPU at 100% of the resources available, see it peg 1 CPU\nyes > /dev/null\n\n# from #2, (you can get the PID from htop) to stop the CPU from being pegged\nkill -9 <PID of yes>\n\n# from #2 this allows the cgroup to only use 5% of a CPU\necho '5000 100000' > /sys/fs/cgroup/sandbox/cpu.max\n\n# inside #1 / the cgroup/unshare ā this will peg one core of a CPU at 5% since we limited it\nyes > /dev/null\n\n# from #2, to stop the CPU from being pegged, get the PID from htop\nkill -9 <PID of yes>\n
Pretty cool, right? Now, no matter how bad of code we run inside of our chroot'd, unshare'd, cgroup'd environment, we cannot take more than 5% of a CPU core.
\nOne more demo, the dreaded fork bomb. A fork bomb is a script that forks itself into multiple processes, which then fork themselves, which them fork themselves, etc. until all resources are consumed and it crashes the computer. It can be written plainly as
\nfork() {\n fork | fork &\n}\nfork\n
but you'll see it written as :(){ :|:& };:
where :
is the name of the function instead of fork
.
So someone could run a fork bomb on our system right now and it'd limit the blast radius of CPU and RAM but creating and destroying so many processes still carries a toll on the system. What we can do to more fully prevent a fork bomb is limit how many PIDs can be active at once. Let's try that.
\n# See how many processes the cgroup has at the moment\ncat /sys/fs/cgroup/sandbox/pids.current\n\n# See how many processes the cgroup can create before being limited (max)\ncat /sys/fs/cgroup/sandbox/pids.max\n\n# set a limit that the cgroup can only run 3 processes at a time\necho 3 > /sys/fs/cgroup/sandbox/pids.max\n\n# this runs 5 15 second processes that run and then stop. run this from within #2 and watch it work. now run it in #1 and watch it not be able to. it will have to retry several times\nfor a in $(seq 1 5); do sleep 15 & done\n\n# DO NOT RUN THIS ON YOUR COMPUTER. This is a fork bomb. If not accounted for, this would bring down your computer. However we can safely run inside our #1 because we've limited the amount of PIDs available. It will end up spawning about 100 processes total but eventually will run out of forks to fork.\n:(){ :|:& };:\n
Attack prevented! 3 processes is way too few for anyone to do anything meaningful but by limiting the max PIDs available it allows you to limit what damage could be done. I'll be honest, this is the first time I've run a fork bomb on a computer and it's pretty exhilirating. I felt like I was in the movies Hackers. Hack the planet!.
\nAnd now we can call this a container. You have handcrafted a container. A container is literally nothing more than we did together. There's other sorts of technologies that will accompany containers like runtimes and daeomons, but the containers themselves are just a combination of chroot, namespaces, and cgroups! Using these features together, we allow Bob, Alice, and Eve to run whatever code they want and the only people they can mess with is themselves.
\nSo while this is a container at its most basic sense, we haven't broached more advance topics like networking, deploying, bundling, or anything else that something like Docker takes care of for us. But now you know at its most base level what a container is, what it does, and how you could do this yourself but you'll be grateful that Docker does it for you. On to the next lesson!
\n","markdown":"\nOkay, so now we've hidden the processes from Eve so Bob and Alice can engage in commerce in privacy and peace. So we're all good, right? They can no longer mess each other, right? Not quite. We're almost there.\n\nSo now say it's Black Friday, Boxing Day or Singles' Day (three of the biggest shopping days in the year, pick the one that makes the most sense to you š) and Bob and Alice are gearing up for their biggest sales day of the year. Everything is ready to go and at 9:00AM their site suddenly goes down without warning. What happened!? They log on to their chroot'd, unshare'd shell on your server and see that the CPU is pegged at 100% and there's no more memory available to allocate! Oh no! What happened?\n\nThe first explanation could be that Eve has her site running on another virtual server and simple logged on and ran a malicious script that ate up all the available resources so that Bob and Alice so that their sites would go down and Eve would be the only site that was up, increasing her sales.\n\nHowever another, possibly more likely explanation is that both Bob's and Alice's sites got busy at the same time and that in-and-of-itself took all the resources without any malice involved, taking down their sites and everyone else on the server. Or perhaps Bob's site had a memory leak and that was enough to take all the resources available.\n\nSuffice to say, we still have a problem. Every isolated environment has access to all _physical_ resources of the server. There's no isolation of physical components from these environments.\n\nEnter the hero of this story: cgroups, or control groups. Google saw this same problem when building their own infrastructure and wanted to protect runaway processes from taking down entire servers and made this idea of cgroups so you can say \"this isolated environment only gets so much CPU, so much memory, etc. and once it's out of those it's out-of-luck, it won't get any more.\"\n\nThis is a bit more difficult to accomplish but let's go ahead and give it a shot.\n\n> cgroups v2 is now the standard. Run `grep -c cgroup /proc/mounts` in your terminal. If the number that is **greater than one**, the system you're using is cgroups v1. [Click here][move-to-v2] if you want to try to get your system from cgroup v1 to v2. As this is fairly involved, I would just suggest using a more recent version of Ubuntu as it will have cgroups v2 on it.\n>\n> If you want to learn cgroups v1 (which I would not suggest, they're getting phased out), [the first version of this course][v1] teaches them.\n\ncgroups as we have said allow you to move processes and their children into groups which then allow you to limit various aspects of them. Imagine you're running a single physical server for Google with both Maps and GMail having virtual servers on it. If Maps ships an infinite loop bug and it pins the CPU usage of the server to 100%, you only want Maps to go down and _not_ GMail just because it happens to be colocated with Maps. Let's see how to do that.\n\nYou interact with cgroups by a pseudo-file system. Honestly the whole interface feels weird to me but that is what it is! Inside your #2 terminal (the non-unshared one) run `cd /sys/fs/cgroup` and then run `ls`. You'll see a bunch of \"files\" that look like `cpu.max`, `cgroup.procs`, and `memory.high`. Each one of these represents a setting that you can play with with regard to the cgroup. In this case, we are looking at the root cgroup: all cgroups will be children of this root cgroup. The way you make your own cgroup is by creating a folder inside of the cgroup.\n\n```bash\n# creates the cgroup\nmkdir /sys/fs/cgroup/sandbox\n\n# look at all the files created automatically\nls /sys/fs/cgroup/sandbox\n```\n\nWe now have a sandbox cgroup which is a child of the root cgroup and can putting limits on it! If we wanted to create a child of sandbox, as you may have guessed, just create another folder inside of sandbox.\n\nLet's move our unshared environment into the cgroup. Every process belongs to exactly one cgroup. If you move a process to a cgroup, it will automatically be removed from the cgroup it was in. If we move our unshared bash process from the root cgroup to the sandbox cgroup, it will be removed from the root cgroup without you doing anything.\n\n```bash\n# Find your isolated bash PID, it's the bash one immediately after the unshare\nps aux\n\n# should see the process in the root cgroup\ncat /sys/fs/cgroup/cgroup.procs\n\n# puts the unshared env into the cgroup called sandbox\nechoI've heard people call this "cha-root" and "change root". I'm going to stick to "change root" because I feel less ridiculous saying that. It's a Linux command that allows you to set the root directory of a new process. In our container use case, we just set the root directory to be where-ever the new container's new root directory should be. And now the new container group of processes can't see anything outside of it, eliminating our security problem because the new process has no visibility outside of its new root.
\nLet's try it. Start up a Ubuntu VM however you feel most comfortable. I'll be using Docker (and doing containers within containers š¤Æ). If you're like me, run docker run -it --name docker-host --rm --privileged ubuntu:jammy
. This will download the official Ubuntu container from Docker Hub and grab the version marked with the jammy tag. In this case, latest means it's the latest stable release (22.04.) You could put ubuntu:devel
to get the latest development of Ubuntu (as of writing that'd be 24.04). docker run
means we're going to run some commands in the container, and the -it
means we want to make the shell interactive (so we can use it like a normal terminal.)
If you're in Windows and using WSL, just open a new WSL terminal in Ubuntu. āļø
\nTo see what version of Ubuntu you're using, run cat /etc/issue
. cat
reads a file and dumps it into the output which means we can read it, and /etc/issue
is a file that will tell us what distro we're using. Mine says Ubuntu 22.04.4 LTS \\n \\l
.
Okay, so let's attempt to use chroot
right now.
mkdir /my-new-root
.echo "my super secret thing" >> /my-new-root/secret.txt
.chroot /my-new-root bash
and see the error it gives you.You should see something about failing to run a shell or not being able to find bash. That's because bash is a program and your new root wouldn't have bash to run (because it can't reach outside of its new root.) So let's fix that! Run:
\nmkdir /my-new-root/bin
cp /bin/bash /bin/ls /my-new-root/bin/
chroot /my-new-root bash
Still not working! The problem is that these commands rely on libraries to power them and we didn't bring those with us. So let's do that too. Run ldd /bin/bash
. This print out something like this:
$ ldd /bin/bash\n linux-vdso.so.1 (0x0000ffffbe221000)\n libtinfo.so.6 => /lib/aarch64-linux-gnu/libtinfo.so.6 (0x0000ffffbe020000)\n libc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000ffffbde70000)\n /lib/ld-linux-aarch64.so.1 (0x0000ffffbe1e8000)\n
These are the libraries we need for bash. Let's go ahead and copy those into our new environment.
\nmkdir /my-new-root/lib
cp /lib/aarch64-linux-gnu/libtinfo.so.6 /lib/aarch64-linux-gnu/libc.so.6 /lib/ld-linux-aarch64.so.1 /my-new-root/lib
ls
. Run ldd /bin/ls
ls
into our my-new-root
.cp /lib/aarch64-linux-gnu/libselinux.so.1 /lib/aarch64-linux-gnu/libc.so.6 /lib/ld-linux-aarch64.so.1 /lib/aarch64-linux-gnu/libpcre2-8.so.0 /my-new-root/lib
Now, finally, run chroot /my-new-root bash
and run ls
. You should successfully see everything in the directory. Now try pwd
to see your working directory. You should see /
. You can't get out of here! This, before being called containers, was called a jail for this reason. At any time, hit CTRL+D or run exit
to get out of your chrooted environment.
Now try running cat secret.txt
. Oh no! Your new chroot-ed environment doesn't know how to cat! As an exercise, go make cat
work the same way we did above!
Congrats you just cha-rooted the **** out of your first environment!
\n","markdown":"\nI've heard people call this \"cha-root\" and \"change root\". I'm going to stick to \"change root\" because I feel less ridiculous saying that. It's a Linux command that allows you to set the root directory of a new process. In our container use case, we just set the root directory to be where-ever the new container's new root directory should be. And now the new container group of processes can't see anything outside of it, eliminating our security problem because the new process has no visibility outside of its new root.\n\nLet's try it. Start up a Ubuntu VM however you feel most comfortable. I'll be using Docker (and doing containers within containers š¤Æ). If you're like me, run `docker run -it --name docker-host --rm --privileged ubuntu:jammy`. This will download the [official Ubuntu container][ubuntu] from Docker Hub and grab the version marked with the _jammy_ tag. In this case, _latest_ means it's the latest stable release (22.04.) You could put `ubuntu:devel` to get the latest development of Ubuntu (as of writing that'd be 24.04). `docker run` means we're going to run some commands in the container, and the `-it` means we want to make the shell interactive (so we can use it like a normal terminal.)\n\nIf you're in Windows and using WSL, just open a new WSL terminal in Ubuntu. āļø\n\nTo see what version of Ubuntu you're using, run `cat /etc/issue`. `cat` reads a file and dumps it into the output which means we can read it, and `/etc/issue` is a file that will tell us what distro we're using. Mine says `Ubuntu 22.04.4 LTS \\n \\l`.\n\nOkay, so let's attempt to use `chroot` right now.\n\n1. Make a new folder in your root directory via `mkdir /my-new-root`.\n1. Inside that new folder, run `echo \"my super secret thing\" >> /my-new-root/secret.txt`.\n1. Now try to run `chroot /my-new-root bash` and see the error it gives you.\n\nYou should see something about failing to run a shell or not being able to find bash. That's because bash is a program and your new root wouldn't have bash to run (because it can't reach outside of its new root.) So let's fix that! Run:\n\n1. `mkdir /my-new-root/bin`\n1. `cp /bin/bash /bin/ls /my-new-root/bin/`\n1. `chroot /my-new-root bash`\n\nStill not working! The problem is that these commands rely on libraries to power them and we didn't bring those with us. So let's do that too. Run `ldd /bin/bash`. This print out something like this:\n\n```bash\n$ ldd /bin/bash\n\tlinux-vdso.so.1 (0x0000ffffbe221000)\n\tlibtinfo.so.6 => /lib/aarch64-linux-gnu/libtinfo.so.6 (0x0000ffffbe020000)\n\tlibc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000ffffbde70000)\n\t/lib/ld-linux-aarch64.so.1 (0x0000ffffbe1e8000)\n```\n\nThese are the libraries we need for bash. Let's go ahead and copy those into our new environment.\n\n1. `mkdir /my-new-root/lib`\n1. Then we need to copy all those paths (ignore the lines that don't have paths) into our directory. Make sure you get the right files in the right directory. In my case above (yours likely will be different) it's:\n 1. `cp /lib/aarch64-linux-gnu/libtinfo.so.6 /lib/aarch64-linux-gnu/libc.so.6 /lib/ld-linux-aarch64.so.1 /my-new-root/lib`\n1. Do it again for `ls`. Run `ldd /bin/ls`\n1. Follow the same process to copy the libraries for `ls` into our `my-new-root`.\n 1. `cp /lib/aarch64-linux-gnu/libselinux.so.1 /lib/aarch64-linux-gnu/libc.so.6 /lib/ld-linux-aarch64.so.1 /lib/aarch64-linux-gnu/libpcre2-8.so.0 /my-new-root/lib`\n\nNow, finally, run `chroot /my-new-root bash` and run `ls`. You should successfully see everything in the directory. Now try `pwd` to see your working directory. You should see `/`. You can't get out of here! This, before being called containers, was called a jail for this reason. At any time, hit CTRL+D or run `exit` to get out of your chrooted environment.\n\n## cat exercise\n\nNow try running `cat secret.txt`. Oh no! Your new chroot-ed environment doesn't know how to cat! As an exercise, go make `cat` work the same way we did above!\n\nCongrats you just cha-rooted the \\*\\*\\*\\* out of your first environment!\n\n[ubuntu]: https://hub.docker.com/_/ubuntu\n","slug":"chroot","title":"chroot","section":"Crafting Containers by Hand","icon":"hand-holding-heart","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/02-crafting-containers-by-hand/B-chroot.md","nextSlug":"/lessons/crafting-containers-by-hand/namespaces","prevSlug":"/lessons/crafting-containers-by-hand/what-are-containers"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/namespaces.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/namespaces.json new file mode 100644 index 0000000..2b8d9bd --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/crafting-containers-by-hand/namespaces.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Understand the importance of namespaces and cgroups for security and resource management in server environments. Learn how namespaces can isolate processes to enhance security and prevent unauthorized access in shared server environments, beyond what chroot alone provides.","keywords":["namespaces","cgroups","security","resource management","chroot","process isolation","server environment"]},"html":"While chroot is a pretty straightforward, namespaces and cgroups are a bit more nebulous to understand but no less important. Both of these next two features are for security and resource management.
\nLet's say you're running a big server that's in your home and you're selling space to customers (that you don't know) to run their code on your server. What sort of concerns would you have about running their "untrusted" code? Let's say you have Alice and Bob who are running e-commerce services dealing with lots of money. They themselves are good citizens of the servers and minding their own business. But then you have Eve join the server who has other intentions: she wants to steal money, source code, and whatever else she can get her hands on from your other tenants on the server. If just gave all three them unfettered root access to server, what's to stop Eve from taking everything? Or what if she just wants to disrupt their businesses, even if she's not stealing anything?
\nYour first line of defense is that you could log them into chroot'd environments and limit them to only those. Great! Now they can't see each others' files. Problem solved? Well, no, not quite yet. Despite the fact that she can't see the files, she can still see all the processes going on on the computer. She can kill processes, unmount filesystem and even hijack processes.
\nEnter namespaces. Namespaces allow you to hide processes from other processes. If we give each chroot'd environment different sets of namespaces, now Alice, Bob, and Eve can't see each others' processes (they even get different process PIDs, or process IDs, so they can't guess what the others have) and you can't steal or hijack what you can't see!
\nThere's a lot more depth to namespaces beyond what I've outlined here. The above is describing just the PID namespace. There are more namespaces as well and this will help these containers stay isloated from each other.
\nNow, this isn't secure. The only thing we've protected is the file system, mostly.
\ndocker exec -it docker-host bash
. This will get another terminal session #2 for us (I'll refer to the chroot'd environment as #1)tail -f /my-new-root/secret.txt &
in #2. This will start an infinitely running process in the background.ps
to see the process list in #2 and see the tail
process running. Copy the PID (process ID) for the tail process.kill <PID you just copied>
. This will kill the tail process from inside the chroot'd
environment. This is a problem because that means chroot isn't enough to isolate someone. We need more barriers. This is just one problem, processes, but it's illustrative that we need more isolation beyond just the file system.So let's create a chroot'd environment now that's isolated using namespaces using a new command: unshare
. unshare
creates a new isolated namespace from its parent (so you, the server provider can't spy on Bob nor Alice either) and all other future tenants. Run this:
NOTE: This next command downloads about 150MB and takes at least a few minutes to run. Unlike Docker images, this will redownload it every time you run it and does no caching.
\n# from our chroot'd environment if you're still running it, if not skip this\nexit\n\n## Install debootstrap\napt-get update -y\napt-get install debootstrap -y\ndebootstrap --variant=minbase jammy /better-root\n\n# head into the new namespace'd, chroot'd environment\nunshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot /better-root bash # this also chroot's for us\nmount -t proc none /proc # process namespace\nmount -t sysfs none /sys # filesystem\nmount -t tmpfs none /tmp # filesystem\n
This will create a new environment that's isolated on the system with its own PIDs, mounts (like storage and volumes), and network stack. Now we can't see any of the processes!
\nNow try our previous exercise again.
\ntail -f /my-new-root/secret.txt &
from #2 (not the unshare env)ps
from #1, grab pid for tail
kill <pid for tail>
, see that it doesn't workWe used namespaces to protect our processes! We could explore the other namespaces but know it's a similar exercise: using namespaces to restrict capabilities of containers to interfering with other containers (both for nefarious purposes and to protect ourselves from ourselves.)
\n","markdown":"\nWhile chroot is a pretty straightforward, namespaces and cgroups are a bit more nebulous to understand but no less important. Both of these next two features are for security and resource management.\n\nLet's say you're running a big server that's in your home and you're selling space to customers (that you don't know) to run their code on your server. What sort of concerns would you have about running their \"untrusted\" code? Let's say you have Alice and Bob who are running e-commerce services dealing with lots of money. They themselves are good citizens of the servers and minding their own business. But then you have Eve join the server who has other intentions: she wants to steal money, source code, and whatever else she can get her hands on from your other tenants on the server. If just gave all three them unfettered root access to server, what's to stop Eve from taking everything? Or what if she just wants to disrupt their businesses, even if she's not stealing anything?\n\nYour first line of defense is that you could log them into chroot'd environments and limit them to only those. Great! Now they can't see each others' files. Problem solved? Well, no, not quite yet. Despite the fact that she can't see the files, she can still see all the processes going on on the computer. She can kill processes, unmount filesystem and even hijack processes.\n\nEnter namespaces. Namespaces allow you to hide processes from other processes. If we give each chroot'd environment different sets of namespaces, now Alice, Bob, and Eve can't see each others' processes (they even get different process PIDs, or process IDs, so they can't guess what the others have) and you can't steal or hijack what you can't see!\n\nThere's a lot more depth to namespaces beyond what I've outlined here. The above is describing _just_ the PID namespace. There are more namespaces as well and this will help these containers stay isloated from each other.\n\n## The problem with chroot alone\n\nNow, this isn't secure. The only thing we've protected is the file system, mostly.\n\n1. chroot in a terminal into our environment\n1. In another terminal, run `docker exec -it docker-host bash`. This will get another terminal session #2 for us (I'll refer to the chroot'd environment as #1)\n1. Run `tail -f /my-new-root/secret.txt &` in #2. This will start an infinitely running process in the background.\n1. Run `ps` to see the process list in #2 and see the `tail` process running. Copy the PID (process ID) for the tail process.\n1. In #1, the chroot'd shell, run `killContainers are probably simpler than you think they are. Before I took a deep dive into what they are, I was very intimidated by the concept of what containers were. I thought they were for one super-versed in Linux and sysadmin type activties. In reality, the core of what containers are is just a few features of the Linux kernel duct-taped together. Honestly, there's no single concept of a "container": it's just using a few features of Linux together to achieve isolation. That's it.
\nSo how comfortable are you with the command line? This course doesn't assume wizardry with bash or zsh but this probably shouldn't be your first adventure with it. If it is, check out my course on the command line and Linux. This course will give you more than we'll need to keep up with this course.
\nLet's start with why first, why we need containers.
\nHistorically, if you wanted to run a web server, you either set up your own or you rented a literal server somewhere. We often call this "bare metal" because, well, your code is literally executing on the processor with no abstraction. This is great if you're extremely performance sensitive and you have ample and competent staffing to take care of these servers.
\nThe problem with running your servers on the bare metal is you come become extremely inflexible. Need to spin up another server? Call up Dell or IBM and ask them to ship you another one, then get your tech to go install the physical server, set up the server, and bring into the server farm. That only takes a month or two right? Pretty much instant. š
\nOkay, so now at least you have a pool of servers responding to web traffic. Now you just to worry about keeping the operating system up to date. Oh, and all the drivers connecting to the hardware. And all the software running on the server. And replacing the components of your server as new ones come out. Or maybe the whole server. And fixing failed components. And network issues. And running cables. And your power bill. And who has physical access to your server room. And the actual temperature of the data center. And paying a ridiculous Internet bill. You get the point. Managing your own servers is its own set of challenges and requires a whole team to do it.
\nVirtual machines are the next step. This is adding a layer of abstraction between you and the metal. Now instead of having one instance of Linux running on your computer, you'll have multiple guest instances of Linux running inside of a host instance of Linux (it doesn't have to be Linux but I'm using it to be illustrative.) Why is this helpful? For one, I can have one beefy server and have it spin up and down virtual servers at will. So now if I'm adding a new service, I can just spin up a new VM on one of my servers (providing I have space to do so.) This allows a lot more flexibility.
\nAnother thing is I can separate two VMs from each other on the same machine totally from each other. This affords a few nice things.
\nSo enter VMs. These are individual instances of operating systems that as far as the OSes know, are running on bare metal themselves. The host operating system offers the VM a certain amount resources and if that VM runs out, they run out and they don't affect other guest operating systems running on the server. If someone else crashes their server, they crash their guest OS and yours hums along unaffected. And since they're in a guest OS, they can't peek into your files because their VM has no concept of any sibling VMs on the machine so it's much more secure.
\nAll these above features come at the cost of a bit of performance. Running an operating system within an operating system isn't free. But in general we have enough computing power and memory that this isn't the primary concern. And of course, with abstraction comes ease at the cost of additional complexity. In this case, the advantages very much outweigh the cost most of the time.
\nSo, as alluded to above, you can nab a VM from a public cloud provider like Microsoft Azure or Amazon Web Services. It will come with a pre-allocated amount of memory and computing power (often called virtual cores or vCores because their dedicated cores to your virutal machine.) Now you no longer have to manage the expensive and difficult business of maintaining a data center but you do have to still manage all the software of it yourself: Microsoft won't update Ubuntu for you (generally speaking, they might prompt you but you still have to worry about it) but they will make sure the hardware is up to date.
\nBut now you have the great ability spin up and spin down virtual machines in the cloud, giving you access to resources with the only upper bound being how much you're willing to pay. And we've been doing this for a while. But the hard part is they're still just giving you machines, you have to manage all the software, networking, provisioning, updating, etc. for all these servers. And lots of companies still do! Tools like Terraform, Chef, Puppet, Salt, etc. help a lot with things like this because they can make spinning up new VMs easy because they can handle the software needed to get it going.
\nWe're still paying the cost of running a whole operating system in the cloud inside of a host operating system. It'd be nice if we could just run the code inside the host OS without the additional expenditure of guest OSs.
\nAnd here we are, containers. As you may have divined, containers give us many of the security and resource-management features of VMs but without the cost of having to run a whole other operating system. It instead usings chroot, namespace, and cgroup to separate a group of processes from each other. If this sounds a little flimsy to you and you're still worried about security and resource-management, you're not alone. But I assure you a lot of very smart people have worked out the kinks and containers are the future of deploying code.
\nSo now that we've been through why we need containers, let's go through the three things that make containers a reality.
\n","markdown":"\nContainers are probably simpler than you think they are. Before I took a deep dive into what they are, I was very intimidated by the concept of what containers were. I thought they were for one super-versed in Linux and sysadmin type activties. In reality, the core of what containers are is just a few features of the Linux kernel duct-taped together. Honestly, there's no single concept of a \"container\": it's just using a few features of Linux together to achieve isolation. That's it.\n\nSo how comfortable are you with the command line? This course doesn't assume wizardry with bash or zsh but this probably shouldn't be your first adventure with it. If it is, [check out my course on the command line and Linux][linux]. This course will give you more than we'll need to keep up with this course.\n\n## Why Containers\n\nLet's start with why first, why we need containers.\n\n### Bare Metal\n\nHistorically, if you wanted to run a web server, you either set up your own or you rented a literal server somewhere. We often call this \"bare metal\" because, well, your code is literally executing on the processor with no abstraction. This is great if you're extremely performance sensitive and you have ample and competent staffing to take care of these servers.\n\nThe problem with running your servers on the bare metal is you come become extremely inflexible. Need to spin up another server? Call up Dell or IBM and ask them to ship you another one, then get your tech to go install the physical server, set up the server, and bring into the server farm. That only takes a month or two right? Pretty much instant. š \n\nOkay, so now at least you have a pool of servers responding to web traffic. Now you just to worry about keeping the operating system up to date. Oh, and all the drivers connecting to the hardware. And all the software running on the server. And replacing the components of your server as new ones come out. Or maybe the whole server. And fixing failed components. And network issues. And running cables. And your power bill. And who has physical access to your server room. And the actual temperature of the data center. And paying a ridiculous Internet bill. You get the point. Managing your own servers is its own set of challenges and requires a whole team to do it.\n\n### Virtual Machines\n\nVirtual machines are the next step. This is adding a layer of abstraction between you and the metal. Now instead of having one instance of Linux running on your computer, you'll have multiple guest instances of Linux running inside of a host instance of Linux (it doesn't have to be Linux but I'm using it to be illustrative.) Why is this helpful? For one, I can have one beefy server and have it spin up and down virtual servers at will. So now if I'm adding a new service, I can just spin up a new VM on one of my servers (providing I have space to do so.) This allows a lot more flexibility.\n\nAnother thing is I can separate two VMs from each other on the same machine _totally_ from each other. This affords a few nice things.\n\n1. Imagine both Coca-Cola and Pepsi lease a server from Microsoft to power their soda making machines and hence have the recipe on the server. Microsoft, wanting to be effecient, buys large physical servers and then allocates virtual servers to each of them. If Microsoft puts both of these virtual servers on the same physical server with no separation, one soda-maker could just connect into the server and browse the competitor's files and find the secret recipe. So this is a massive security problem.\n1. Imagine one of the soda-makers discovers that they're on the same server as their competitor. They could drop a [fork bomb][fork-bomb] to devour all the resources their competitors' website was using and intentionally crash the server.\n1. Much less nefariously, imagine an engineer at Coca-Cola shipped a bug that crashed the whole server. If there's no separation between the two virtual servers, his shipping a bug would also crash Pepsi's website, something they wouldn't be super happy about.\n\nSo enter VMs. These are individual instances of operating systems that as far as the OSes know, are running on bare metal themselves. The host operating system offers the VM a certain amount resources and if that VM runs out, they run out and they don't affect other guest operating systems running on the server. If someone else crashes their server, they crash their guest OS and yours hums along unaffected. And since they're in a guest OS, they can't peek into your files because their VM has no concept of any sibling VMs on the machine so it's much more secure.\n\nAll these above features come at the cost of a bit of performance. Running an operating system within an operating system isn't free. But in general we have enough computing power and memory that this isn't the primary concern. And of course, with abstraction comes ease at the cost of additional complexity. In this case, the advantages very much outweigh the cost most of the time.\n\n### Public Cloud\n\nSo, as alluded to above, you can nab a VM from a public cloud provider like Microsoft Azure or Amazon Web Services. It will come with a pre-allocated amount of memory and computing power (often called virtual cores or vCores because their dedicated cores to your virutal machine.) Now you no longer have to manage the expensive and difficult business of maintaining a data center but you do have to still manage all the software of it yourself: Microsoft won't update Ubuntu for you (generally speaking, they might prompt you but you still have to worry about it) but they will make sure the hardware is up to date.\n\nBut now you have the great ability spin up and spin down virtual machines in the cloud, giving you access to resources with the only upper bound being how much you're willing to pay. And we've been doing this for a while. But the hard part is they're still just giving you machines, you have to manage all the software, networking, provisioning, updating, etc. for all these servers. And lots of companies still do! Tools like Terraform, Chef, Puppet, Salt, etc. help a lot with things like this because they can make spinning up new VMs easy because they can handle the software needed to get it going.\n\nWe're still paying the cost of running a whole operating system in the cloud inside of a host operating system. It'd be nice if we could just run the code inside the host OS without the additional expenditure of guest OSs.\n\n### Containers\n\nAnd here we are, containers. As you may have divined, containers give us many of the security and resource-management features of VMs but without the cost of having to run a whole other operating system. It instead usings chroot, namespace, and cgroup to separate a group of processes from each other. If this sounds a little flimsy to you and you're still worried about security and resource-management, you're not alone. But I assure you a lot of very smart people have worked out the kinks and containers are the future of deploying code.\n\nSo now that we've been through why we need containers, let's go through the three things that make containers a reality.\n\n[fork-bomb]: https://en.wikipedia.org/wiki/Fork_bomb\n[linux]: https://frontendmasters.com/courses/linux-command-line/\n","slug":"what-are-containers","title":"What Are Containers","section":"Crafting Containers by Hand","icon":"hand-holding-heart","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/02-crafting-containers-by-hand/A-what-are-containers.md","nextSlug":"/lessons/crafting-containers-by-hand/chroot","prevSlug":"/lessons/welcome/set-up"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/bind-mounts.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/bind-mounts.json new file mode 100644 index 0000000..064767e --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/bind-mounts.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn about using volumes and bind mounts in Docker containers to handle stateful operations, along with the differences between them. Bind mounts enable flexible file access between the host computer and container, offering a practical solution for testing or development scenarios.","keywords":["Docker","volumes","bind mounts","stateful containers","containerization","NGINX","Dockerfile"]},"html":"So far we've been dealing with self-contained containers. Normally this is all you ever want: containers that can spin up and spin down as frequently as they need to. They're ephemeral, temporary, and disposable. None of these containers are "snowflakes". When I say snowflakes, picture you're running a server that's serving a Wordpress site. Imagine setting up this server, SSH'ing into the server, and setting everything up to be just right and tuned to the exact way you need it. This would be a snowflake server: if someone goes and deletes this server, you're screwed. You have to go and spend a bunch of time re-setting up this server. This is exactly the sort of thing we're trying to avoid with containers. These are the "pet" containers we talked about earlier. We want to make our servers easy to reproduce whenever we want so we can spin up and spin down servers at will. These are the "cattle" containers we talked about.
\nHowever not everything can fit neatly into a container all the time. Sometimes our containers need to be stateful in some capacity. Sometimes our containers need to read and write to the host. This is fundamentally at odds with the idea of a stateless, able-to-create-and-destroy-anytime container that we've been adhering to thusfar. So what are we to do?
\nEnter volumes and bind mounts. Both of these are methods of reading and writing to the host but with slight-but-important differences of when to use which. We'll go over both.
\nLet's start here because this is easier to see the use case for. Bind mounts allow you to mount files from your host computer into your container. This allows you to use the containers a much more flexible way than previously possible: you don't have to know what files the container will have when you build it and it allows you to determine those files when you run it.
\nLet's go over an example of how this could be useful.
\nIn the previous project, we used the NGINX image to build a container with our static assets baked into the container. In general this what I recommend you do since now we can ship that container anywhere and it'll just work. It's totally self-contained. But what if we just want to run a NGINX container locally to test stuff out? Sure, we could make a new Dockerfile and write it, but wouldn't it be cool if we could just use the NGINX container directly? We can! Let's try it. Go back to your static site project from the previous lesson. Let's use the nginx
container to serve directly from it.
# from the root directory of your Astro app\ndocker run --mount type=bind,source="$(pwd)"/dist,target=/usr/share/nginx/html -p 8080:80 nginx:latest\n
āļø Link to the Project (run the docker command from this directory)
\n\n\nMake sure you have built the project. If you did the previous lesson you should have this already built. If you don't see the
\ndist/
directory, runnpm install && npm run build
. You should see thedist
directory after that.
This is how you do bind mounts. It's a bit verbose but necessary. Let's dissect it.
\n--mount
flag to identify we're going to be mounting something in from the host.bind
and volume
. Here we're using bind because we to mount in some piece of already existing data from the host."./dist"
) which is why use the "$(pwd)"
to get the present working directory to make it an absolute path./etc/nginx/nginx.conf
it would use that instead.Again, it's preferable to bake your own container so you don't have to ship the container and the code separately; you'd rather just ship one thing that you can run without much ritual nor ceremony. But this is a useful trick to have in your pocket. It's kind of like serve but with real NGINX.
\n","markdown":"\nSo far we've been dealing with self-contained containers. Normally this is all you ever want: containers that can spin up and spin down as frequently as they need to. They're ephemeral, temporary, and disposable. None of these containers are \"snowflakes\". When I say snowflakes, picture you're running a server that's serving a Wordpress site. Imagine setting up this server, SSH'ing into the server, and setting everything up to be just right and tuned to the exact way you need it. This would be a snowflake server: if someone goes and deletes this server, you're screwed. You have to go and spend a bunch of time re-setting up this server. This is exactly the sort of thing we're trying to avoid with containers. These are the \"pet\" containers we talked about earlier. We want to make our servers easy to reproduce whenever we want so we can spin up and spin down servers at will. These are the \"cattle\" containers we talked about.\n\nHowever not everything can fit neatly into a container all the time. Sometimes our containers need to be stateful in some capacity. Sometimes our containers need to read and write to the host. This is fundamentally at odds with the idea of a stateless, able-to-create-and-destroy-anytime container that we've been adhering to thusfar. So what are we to do?\n\nEnter volumes and bind mounts. Both of these are methods of reading and writing to the host but with slight-but-important differences of when to use which. We'll go over both.\n\n## Bind Mounts\n\nLet's start here because this is easier to see the use case for. Bind mounts allow you to mount files from your host computer into your container. This allows you to use the containers a much more flexible way than previously possible: you don't have to know what files the container will have _when you build it_ and it allows you to determine those files _when you run it_.\n\nLet's go over an example of how this could be useful.\n\nIn the previous project, we used the NGINX image to build a container with our static assets baked into the container. In general this what I recommend you do since now we can ship that container anywhere and it'll just work. It's totally self-contained. But what if we just want to run a NGINX container locally to test stuff out? Sure, we could make a new Dockerfile and write it, but wouldn't it be cool if we could just use the NGINX container directly? We can! Let's try it. Go back to your static site project from the previous lesson. Let's use the `nginx` container to serve directly from it.\n\n```bash\n# from the root directory of your Astro app\ndocker run --mount type=bind,source=\"$(pwd)\"/dist,target=/usr/share/nginx/html -p 8080:80 nginx:latest\n```\n\n[āļø Link to the Project][project] (run the docker command from this directory)\n\n> Make sure you have built the project. If you did the previous lesson you should have this already built. If you don't see the `dist/` directory, run `npm install && npm run build`. You should see the `dist` directory after that.\n\nThis is how you do bind mounts. It's a bit verbose but necessary. Let's dissect it.\n\n- We use the `--mount` flag to identify we're going to be mounting something in from the host.\n- As far as I know the only two types are `bind` and `volume`. Here we're using bind because we to mount in some piece of already existing data from the host.\n- In the source, we identify what part of the host we want to make readable-and-writable to the container. It has to be an absolute path (e.g we can't say `\"./dist\"`) which is why use the `\"$(pwd)\"` to get the **p**resent **w**orking **d**irectory to make it an absolute path.\n- The target is where we want those files to be mounted in the container. Here we're putting it in the spot that NGINX is expecting.\n- As a side note, you can mount as many mounts as you care to, and you mix bind and volume mounts. NGINX has a default config that we're using but if we used another bind mount to mount an NGINX config to `/etc/nginx/nginx.conf` it would use that instead.\n\nAgain, it's preferable to bake your own container so you don't have to ship the container and the code separately; you'd rather just ship one thing that you can run without much ritual nor ceremony. But this is a useful trick to have in your pocket. It's kind of like [serve][serve] but with real NGINX.\n\n[storage]: https://docs.docker.com/storage/\n[project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/static-asset-project\n[serve]: https://github.com/vercel/serve\n","slug":"bind-mounts","title":"Bind Mounts","section":"Docker Features","icon":"box","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/06-docker-features/A-bind-mounts.md","nextSlug":"/lessons/docker-features/volumes","prevSlug":"/lessons/making-tiny-containers/static-asset-project"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/dev-containers.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/dev-containers.json new file mode 100644 index 0000000..a10deed --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/dev-containers.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn how to use containers to streamline setting up development environments, illustrated with a Ruby on Rails example. Explore the benefits of using Dockerfiles and bind mounts for efficient app development. Discover tools like DevContainer CLI, Visual Studio, IntelliJ, and GitHub Codespaces that support dev containers.","keywords":["containers","development environments","Dockerfiles","bind mounts","DevContainer CLI","Visual Studio","GitHub Codespaces"]},"html":"So far we've talking about taking an app and using containers to prepare the apps to run. This is an obvious use case for them and one you're going to use a lot. But let's talk about a different use case for them: building development environments for your apps.
\nLet's paint a picture. Let's say you got a new job with a company and they're a Ruby shop (if you know Ruby, pretend you don't for a sec.) When you arrive, you're going to be given a very long, likely-out-of-date, complicated README that you're going to have to go look for and struggle to set up the proper version of Ruby, the correct dependencies installed, and that Mercury is in retrograde (just kidding.) Suffice to say, it's a not-fun struggle to get new apps working locally, particularly if it's in a stack that you're not familiar with. Shouldn't there be a better way? There is! (I feel like I'm selling knives on an informercial.)
\nContainers! What we can do is define a Dockerfile that sets up all our dependencies so that it's 100% re-createable with zero knowledge of how it works to everyone that approaches it. With bind mounts, we can mount our local code into the container so that we can edit locally and have it propagate into the development container. Let's give it a shot!
\nI am not a Rails developer but I will confess I have always had an admiration for talented Rails developers. On one hand, I really don't like all the black magic that Rails entails. I feel like you whisper an arcane incantation into the CLI and on the other side a new website manifests itself from the ether. On the other hand, a really good Rails dev can make stuff so much faster than me because they can wield that sorcery so well.
\nSo let's say we got added to a new Rails project and had to go set it up. Open this project in VS Code.
\n\nIf you do this in VS Code, it should show you a prompt in the bottom to reopen in a dev container. Say yes.
\n\nIf you miss the notification or want to do it later, you can either do in the Command Palette with the command "Dev Containers: Open Workspace in Container" or with the ><
UI element in the bottom left of VS Code and clicking "Reopen in Container".
This should build the container, setup all the Ruby dependencies and put you in a container. From here, you can open the terminal and see that you're now inside a Linux container. Run rails server
and it will open the container and automatically forward the port for you to open localhost:3000
in your own browser. There you go! Rails running without very little thought about it on our part. This is even running SQLite for us. You can make pretty complicated dev environments (using Docker Compose, we'll talk about that later), this was just a simple example.
Personally, this took a good 30 mins of messing around just to get set up, but with a dev container it was just instant, and that's kind of the magic: it's a ready-made dev environment to go.
\n\n\nJust to be super clear, you dev containers and production containers will be different. You wouldn't want to ship your dev environment to production. So in these cases your project may have multiple Dockerfiles doing different things.
\n
While dev containers is a decidedly Microsoft / GitHub initative to start up, they have opened it into an open standard and other companies can now use dev containers. Here's a few other tools that work with dev containers.
\nThis is not going to be a deep dive into how networking works. Networking is a deep, deep pool of knowledge and merits entire courses to understand. Just worrying about networking is some people's jobs due to the immense surface area of the subject. Instead, I want to just peek under the covers of how to do manual networking with Docker so you can understand what Docker Compose and Kubernetes do for you.
\nSo why do we care about networking? Many reasons! Let's make our Node.js app a bit more complicated. What if it had a database? Let's connect it to a running MongoDB database. We could start this MongoDB database inside of the same container and this might be fine for development on the smallest app but it'd be better and easier if we could just the mongo container directly. But if I have two containers running at the same time (the app containers and the MongoDB container) how do they talk to each other? Networking!
\nThere are several ways of doing networking within Docker and all of them work differently depending which operating system you're on. Again, this is a deep subject and we're just going to skim the surface. We're going to deal with the simplest, the bridge networks. There is a default bridge network running all the time. If you want to check this out, run docker network ls
. You'll see something like this:
$ docker network ls\nNETWORK ID NAME DRIVER SCOPE\nxxxxxxxxxxxx bridge bridge local\nxxxxxxxxxxxx host host local\nxxxxxxxxxxxx none null local\n
The bridge network is the one that exists all the time and we could attach to it if we want to, but again Docker recommends against it so we'll create our own. There's also the host network which is the host computer itself's network. The last network with the null
driver is one that you'd use if you wanted to use some other provider or if you wanted to do it manually yourself.
# create the network\ndocker network create --driver=bridge app-net\n\n# start the mongodb server\ndocker run -d --network=app-net -p 27017:27017 --name=db --rm mongo:7\n
I'm having you run a specific version of MongoDB, v7, because I know the package to interact with it is already available on Ubuntu. Feel free to use v8+ if you know it's available. We also added a few flags. The --name
flag allows us to refer specifically to that one running container, and even better it allows us to use that as its address on the network. We'll see that in a sec. The one other, since we're using --name
is --rm
. If we didn't use that, we'd have to run docker rm db
before restarting our db
container since when it stops a container, it doesn't delete it and its logs and meta data until you tell it to. The --rm
means toss all that stuff as soon as the container finishes and free up that name again.
Now, for fun we can use another MongoDB containter (because it has the mongosh
client on it in addition to have the MongoDB server).
docker run -it --network=app-net --rm mongo:7 mongosh --host db\n
This will be one instance of a MongoDB container connecting to a different container over our Docker network. Cool, right? So let's make our Node.js app read and write to MongoDB!
\nThis isn't a course in MongoDB or anything but more just to show you how to connect one app container to a database container as well as set you up for the next lesson Docker composes. And this sort of method work just as well for any DB: MySQL, Postgres, Redis, etc.
\nSo first thing, let's add some logic to our app that reads and writes to MongoDB
\nconst fastify = require("fastify")({ logger: true });\nconst { MongoClient } = require("mongodb");\nconst url = process.env.MONGO_CONNECTION_STRING || "mongodb://localhost:27017";\nconst dbName = "dockerApp";\nconst collectionName = "count";\n\nasync function start() {\n const client = await MongoClient.connect(url);\n const db = client.db(dbName);\n const collection = db.collection(collectionName);\n\n fastify.get("/", async function handler(request, reply) {\n const count = await collection.countDocuments();\n return { success: true, count };\n });\n\n fastify.get("/add", async function handler(request, reply) {\n const res = await collection.insertOne({});\n return { acknowledged: res.acknowledged };\n });\n\n fastify.listen({ port: 8080, host: "0.0.0.0" }, (err) => {\n if (err) {\n fastify.log.error(err);\n process.exit(1);\n }\n });\n}\n\nstart().catch((err) => {\n console.log(err);\n process.exit(1);\n});\n
āļø Link to the project files
\n\n\nOpen the project files to this above directory so you can get the right dependencies and Dockerfile. Make sure you run
\nnpm install
.
This is pretty similar to the project we ran before in the Layers section. We're just reading and writing to MongoDB now in the Node.js server, but we're using otherwise everything else the same including the same Dockerfile.
\nYou could absolutely run this locally if you have MongoDB running on your host machine since the default connection string will connect to a local MonogDB. But we also left it open so we can feed the app an environmental variable so we can modify it to be a different container.
\nSo build the container and run it using the following commands:
\ndocker build --tag=my-app-with-mongo .\ndocker run -p 8080:8080 --network=app-net --init --env MONGO_CONNECTION_STRING=mongodb://db:27017 my-app-with-mongo\n
Okay so we added a new endpoint and modified one. The first one is /add
which will add an empty object (MongoDB will add an _id
to it so it's not totally empty). It will then return how many items it successfully added to MongoDB (hopefully 1!). And then we modified the /
route to return the count of items in the database. Great! This is how the basics of networking work in Docker.
One key thing here that we need to discuss: if you shut down that one Docker container, where is your data going to go? Well, it'll disappear. How do you mitigate this? Usually with some sort of volume that lives beyond the container, and usually by having more than one container of MongoDB running. It's beyond the scope of this course but you already have the tools you need to be able to do that.
\nCongrats! You've done basic networking in Docker. Now let's go use other tools to make this easier for us.
\n","markdown":"\nThis is not going to be a deep dive into how networking works. Networking is a deep, deep pool of knowledge and merits entire courses to understand. Just worrying about networking is some people's jobs due to the immense surface area of the subject. Instead, I want to just peek under the covers of how to do manual networking with Docker so you can understand what Docker Compose and Kubernetes do for you.\n\nSo why do we care about networking? Many reasons! Let's make our Node.js app a bit more complicated. What if it had a database? Let's connect it to a running MongoDB database. We _could_ start this MongoDB database inside of the same container and this might be fine for development on the smallest app but it'd be better and easier if we could just the [mongo][mongo] container directly. But if I have two containers running at the same time (the app containers and the MongoDB container) how do they talk to each other? Networking!\n\nThere are several ways of doing networking within Docker and all of them work differently depending which operating system you're on. Again, this is a deep subject and we're just going to skim the surface. We're going to deal with the simplest, the bridge networks. There is a default bridge network running all the time. If you want to check this out, run `docker network ls`. You'll see something like this:\n\n```bash\n$ docker network ls\nNETWORK ID NAME DRIVER SCOPE\nxxxxxxxxxxxx bridge bridge local\nxxxxxxxxxxxx host host local\nxxxxxxxxxxxx none null local\n```\n\nThe bridge network is the one that exists all the time and we could attach to it if we want to, but again Docker recommends against it so we'll create our own. There's also the host network which is the host computer itself's network. The last network with the `null` driver is one that you'd use if you wanted to use some other provider or if you wanted to do it manually yourself.\n\n```bash\n# create the network\ndocker network create --driver=bridge app-net\n\n# start the mongodb server\ndocker run -d --network=app-net -p 27017:27017 --name=db --rm mongo:7\n```\n\nI'm having you run a specific version of MongoDB, v7, because I know the package to interact with it is already available on Ubuntu. Feel free to use v8+ if you know it's available. We also added a few flags. The `--name` flag allows us to refer specifically to that one running container, and even better it allows us to use that as its address on the network. We'll see that in a sec. The one other, since we're using `--name` is `--rm`. If we didn't use that, we'd have to run `docker rm db` before restarting our `db` container since when it stops a container, it doesn't delete it and its logs and meta data until you tell it to. The `--rm` means toss all that stuff as soon as the container finishes and free up that name again.\n\nNow, for fun we can use _another_ MongoDB containter (because it has the `mongosh` client on it in addition to have the MongoDB server).\n\n```bash\ndocker run -it --network=app-net --rm mongo:7 mongosh --host db\n```\n\nThis will be one instance of a MongoDB container connecting to a different container over our Docker network. Cool, right? So let's make our Node.js app read and write to MongoDB!\n\n## Connecting our Node.js App to MongoDB\n\nThis isn't a course in MongoDB or anything but more just to show you how to connect one app container to a database container as well as set you up for the next lesson Docker composes. And this sort of method work just as well for any DB: MySQL, Postgres, Redis, etc.\n\nSo first thing, let's add some logic to our app that reads and writes to MongoDB\n\n```javascript\nconst fastify = require(\"fastify\")({ logger: true });\nconst { MongoClient } = require(\"mongodb\");\nconst url = process.env.MONGO_CONNECTION_STRING || \"mongodb://localhost:27017\";\nconst dbName = \"dockerApp\";\nconst collectionName = \"count\";\n\nasync function start() {\n const client = await MongoClient.connect(url);\n const db = client.db(dbName);\n const collection = db.collection(collectionName);\n\n fastify.get(\"/\", async function handler(request, reply) {\n const count = await collection.countDocuments();\n return { success: true, count };\n });\n\n fastify.get(\"/add\", async function handler(request, reply) {\n const res = await collection.insertOne({});\n return { acknowledged: res.acknowledged };\n });\n\n fastify.listen({ port: 8080, host: \"0.0.0.0\" }, (err) => {\n if (err) {\n fastify.log.error(err);\n process.exit(1);\n }\n });\n}\n\nstart().catch((err) => {\n console.log(err);\n process.exit(1);\n});\n```\n\n[āļø Link to the project files][project]\n\n> Open the project files to this above directory so you can get the right dependencies and Dockerfile. Make sure you run `npm install`.\n\nThis is pretty similar to the project we ran before in the Layers section. We're just reading and writing to MongoDB now in the Node.js server, but we're using otherwise everything else the same including the same Dockerfile.\n\nYou could absolutely run this locally if you have MongoDB running on your host machine since the default connection string will connect to a local MonogDB. But we also left it open so we can feed the app an environmental variable so we can modify it to be a different container.\n\nSo build the container and run it using the following commands:\n\n```bash\ndocker build --tag=my-app-with-mongo .\ndocker run -p 8080:8080 --network=app-net --init --env MONGO_CONNECTION_STRING=mongodb://db:27017 my-app-with-mongo\n```\n\nOkay so we added a new endpoint and modified one. The first one is `/add` which will add an empty object (MongoDB will add an `_id` to it so it's not totally empty). It will then return how many items it successfully added to MongoDB (hopefully 1!). And then we modified the `/` route to return the count of items in the database. Great! This is how the basics of networking work in Docker.\n\nOne key thing here that we need to discuss: if you shut down that one Docker container, where is your data going to go? Well, it'll disappear. How do you mitigate this? Usually with some sort of volume that lives beyond the container, and usually by having more than one container of MongoDB running. It's beyond the scope of this course but you already have the tools you need to be able to do that.\n\nCongrats! You've done basic networking in Docker. Now let's go use other tools to make this easier for us.\n\n[mongo]: https://hub.docker.com/_/mongo\n[project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/networking-with-docker\n","slug":"networking-with-docker","title":"Networking with Docker","section":"Docker Features","icon":"box","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/06-docker-features/D-networking-with-docker.md","nextSlug":"/lessons/multi-container-projects/docker-compose","prevSlug":"/lessons/docker-features/dev-containers"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/volumes.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/volumes.json new file mode 100644 index 0000000..0e9c983 --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker-features/volumes.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn about the differences between bind mounts and volumes in Docker, how to persist data using volumes for containers, and create a Node.js app with Docker volumes. Understand the benefits of using volumes over bind mounts in Docker for data persistence and manageability.","keywords":["Docker bind mounts vs volumes","persist data in Docker containers","create Node.js app with Docker volumes"]},"html":"Bind mounts are great for when you need to share data between your host and your container as we just learned. Volumes, on the other hand, are so that your containers can maintain state between runs. So if you have a container that runs and the next time it runs it needs the results from the previous time it ran, volumes are going to be helpful. Volumes can not only be shared by the same container-type between runs but also between different containers. Maybe if you have two containers and you want to log to consolidate your logs to one place, volumes could help with that.
\nThey key here is this: bind mounts are file systems managed the host. They're just normal files in your host being mounted into a container. Volumes are different because they're a new file system that Docker manages that are mounted into your container. These Docker-managed file systems are not visible to the host system (they can be found but it's designed not to be.)
\nLet's make a quick Node.js app that reads from a file that a number in it, prints it, writes it to a volume, and finishes. Create a new Node.js project.
\nmkdir docker-volume\ncd docker-volume\ntouch index.js Dockerfile\n
Inside that index.js file, put this:
\nconst fs = require("fs").promises;\nconst path = require("path");\n\nconst dataPath = path.join(process.env.DATA_PATH || "./data.txt");\n\nfs.readFile(dataPath)\n .then((buffer) => {\n const data = buffer.toString();\n console.log(data);\n writeTo(+data + 1);\n })\n .catch((e) => {\n console.log("file not found, writing '0' to a new file");\n writeTo(0);\n });\n\nconst writeTo = (data) => {\n fs.writeFile(dataPath, data.toString()).catch(console.error);\n};\n
Don't worry too much about the index.js. It looks for a file $DATA_PATH
if it exists or ./data.txt
if it doesn't and if it exists, it reads it, logs it, and writes back to the data file after incrementing the number. If it just run it right now, it'll create a data.txt
file with 0 in it. If you run it again, it'll have 1
in there and so on. So let's make this work with volumes.
FROM node:20-alpine\nCOPY --chown=node:node . /src\nWORKDIR /src\nCMD ["node", "index.js"]\n
Now run
\ndocker build -t incrementor .\ndocker run --rm incrementor\n
Every time you run this it'll be the same thing. This is nothing is persisted once the container finishes. We need something that can live between runs. We could use bind mounts and it would work but this data is only designed to be used and written to within Docker which makes volumes preferable and recommended by Docker. If you use volumes, Docker can handle back ups, clean ups, and more security for you. If you use bind mounts, you're on your own.
\nSo, without having to rebuild your container, try this
\ndocker run --rm --env DATA_PATH=/data/num.txt --mount type=volume,src=incrementor-data,target=/data incrementor\n
Now you should be to run it multiple times and everything should work! We use the --env
flag to set the DATA_PATH to be where we want index.js
to write the file and we use --mount
to mount a named volume called incrementor-data
. You can leave this out and it'll be an anonymous volume that will persist beyond the container but it won't automatically choose the right one on future runs. Awesome!
Prefer to use volumes when you can, use bind mounts where it makes sense. If you're still unclear, the official Docker storage docs are pretty good on the subject.
\nThere are two more that we didn't talk about, tmpfs
and npipe
. The former is Linux only and the latter is Windows only (we're not going over Windows containers at all in this workshop.) tmpfs
imitates a file system but actually keeps everything in memory. This is useful for mounting in secrets like database keys or anything that wouldn't be persisted between container launches but you don't want to add to the Dockerfile. The latter is useful for mounting third party tools for Windows containers. If you need more info than that, refer to the docs. I've never directly used either.
Let's take a look at some more cool features of the Docker CLI.
\npull
allows you to pre-fetch container to run.
# this just downloads and caches the image, it doesn't do anything else with it\ndocker pull jturpin/hollywood\n\n# notice it's already loaded and cached here; it doesn't redownload it\ndocker run -it jturpin/hollywood hollywood\n
That will pull the hollywood container from the user jturpin's user account. The second line will execute this fun container which is just meant to look a hacker's screen in a movie (it doesn't really do anything than look cool.)
\n\n\nNote: The
\njturpin/hollywood
image has been depricated. These steps should still work, but if you have issues, you can replace that image withbcbcarl/hollywood
.
push
allows you to push containers to whatever registry you're connected to (probably normally Docker Hub or something like Azure Container Registry or GitHub Container Registry).
docker inspect node:20\n
This will dump out a lot of info about the container. Helpful when figuring out what's going on with a container
\nAs it looks, these pauses or unpause all the processes in a container. Feel free to try
\ndocker run -dit --name hw --rm jturpin/hollywood hollywood\ndocker ps # see container running\ndocker pause hw\ndocker ps # see container paused\ndocker unpause hw\ndocker ps # see container running again\ndocker kill hw # see container is gone\n
This allows you to execute a command against a running container. This is different from docker run
because docker run
will start a new container whereas docker exec
runs the command in an already-running container.
docker run -dit --name hw --rm jturpin/hollywood hollywood\n\n# see it output all the running processes of the container\ndocker exec hw ps aux\n
If you haven't seen ps aux
before, it's a really useful way to see what's running on your computer. Try running ps aux
on your macOS or Linux computer to see everything running.
Allows you to dump out your container to a tar ball (which we did above.) You can also import a tar ball as well.
\nWe'll get into layers in a bit but this allow you to see how this Docker image's layer composition has changed over time and how recently.
\ndocker history node:20\n
Dumps a bunch of info about the host system. Useful if you're on a VM somewhere and not sure what the environment is.
\ndocker info\n
Allows you to see processes running on a container (similar to what we did above)
\ndocker run -dit --name my-mongo --rm mongo\ndocker top my-mongo # you should see MongoDB running\ndocker kill my-mongo\n
If you run docker ps --all
it'll show all containers you've stopped running in addition to the runs you're running. If you want to remove something from this list, you can do docker rm <id or name>
.
You can run docker container prune
to remove all of the stopped containers.
If you want to remove an image from your computer (to save space or whatever) you can run docker rmi mongo
and it'll delete the image from your computer. This isn't a big deal since you can always reload it again
Very useful to see the output of one of your running containers.
\ndocker run --name my-mongo --rm -dit mongo\ndocker logs my-mongo # see all the logs\ndocker kill my-mongo\n
Pretty self explanatory. Will restart a running container
\nIf you want to see if a container exists on Docker Hub (or whatever registry you're connected to), this will allow you to take a look.
\ndocker search python # see all the various flavors of Python containers you can run\ndocker search node # see all the various flavors of Node.js containers you can run\n
","markdown":"\nLet's take a look at some more cool features of the Docker CLI.\n\n### pull / push\n\n`pull` allows you to pre-fetch container to run.\n\n```bash\n# this just downloads and caches the image, it doesn't do anything else with it\ndocker pull jturpin/hollywood\n\n# notice it's already loaded and cached here; it doesn't redownload it\ndocker run -it jturpin/hollywood hollywood\n```\n\nThat will pull the hollywood container from the user jturpin's user account. The second line will execute this fun container which is just meant to look a hacker's screen in a movie (it doesn't really do anything than look cool.)\n\n> Note: The `jturpin/hollywood` image has been depricated. These steps should still work, but if you have issues, you can replace that image with `bcbcarl/hollywood`.\n\n`push` allows you to push containers to whatever registry you're connected to (probably normally Docker Hub or something like Azure Container Registry or GitHub Container Registry).\n\n### inspect\n\n```bash\ndocker inspect node:20\n```\n\nThis will dump out a lot of info about the container. Helpful when figuring out what's going on with a container\n\n### pause / unpause\n\nAs it looks, these pauses or unpause all the processes in a container. Feel free to try\n\n```bash\ndocker run -dit --name hw --rm jturpin/hollywood hollywood\ndocker ps # see container running\ndocker pause hw\ndocker ps # see container paused\ndocker unpause hw\ndocker ps # see container running again\ndocker kill hw # see container is gone\n```\n\n### exec\n\nThis allows you to execute a command against a running container. This is different from `docker run` because `docker run` will start a new container whereas `docker exec` runs the command in an already-running container.\n\n```bash\ndocker run -dit --name hw --rm jturpin/hollywood hollywood\n\n# see it output all the running processes of the container\ndocker exec hw ps aux\n```\n\nIf you haven't seen `ps aux` before, it's a really useful way to see what's running on your computer. Try running `ps aux` on your macOS or Linux computer to see everything running.\n\n### import / export\n\nAllows you to dump out your container to a tar ball (which we did above.) You can also import a tar ball as well.\n\n### history\n\nWe'll get into layers in a bit but this allow you to see how this Docker image's layer composition has changed over time and how recently.\n\n```bash\ndocker history node:20\n```\n\n### info\n\nDumps a bunch of info about the host system. Useful if you're on a VM somewhere and not sure what the environment is.\n\n```bash\ndocker info\n```\n\n### top\n\nAllows you to see processes running on a container (similar to what we did above)\n\n```bash\ndocker run -dit --name my-mongo --rm mongo\ndocker top my-mongo # you should see MongoDB running\ndocker kill my-mongo\n```\n\n### rm / rmi\n\nIf you run `docker ps --all` it'll show all containers you've stopped running in addition to the runs you're running. If you want to remove something from this list, you can do `docker rm So it's much easier to do what we did with Docker. Run this command:
\ndocker run --interactive --tty alpine:3.19.1\n# or, to be shorter: docker run -it alpine:3.19.1\n
A bit easier to remember, right? This will drop you into a Alpine ash shell inside of a container as the root user of that container. When you're done, just run exit
or hit CTRL+D. Notice that this will grab the alpine image from Docker for you and run it. The run
part of the command is telling Docker you're going to be executing a container (as opposed to building it.) The -it
part says you want to be dropped into the container interactively so you can run commands and inspect the container. By default containers run and then exit as soon as they're done. Go ahead and try docker run alpine:3.19.1
. It'll look it did nothing but it actually starts the container and then, because it has nothing defined for it to do, it just exits.
So what if you wanted it to execute something? Try this:
\ndocker run alpine:3.19.1 ls\n
Or let's switch to Ubuntu now, since it's more familiar to most. We'll talk about Alpine later on in-depth.
\ndocker run ubuntu:jammy ls\n
The ls
part at the end is what you pass into the container to be run. As you can see here, it executes the command, outputs the results, and shuts down the container. This is great for running a Node.js server. Since it doesn't exit, it'll keep running until the server crashes or the server exits itself.
So now what if we want to detach the container running from the foreground? Let's try that.
\ndocker run --detach -it ubuntu:jammy # or, to be shorter: docker run -dit ubuntu:jammy\n
So it prints a long hash out and then nothing. Oh no! What happened to it!? Well, it's running in the background. So how do we get ahold of it?
\ndocker ps\n
This will print out all the running containers that Docker is managing for you. You should see your container there. So copy the ID or the name and say:
\ndocker attach <ID or name> # e.g. `docker attach 20919c49d6e5` would attach to that container\n
This allows you to attach a shell to a running container and mess around with it. Useful if you need to inspect something or see running logs. Feel free to type exit
to get out of here. Run docker run -dit ubuntu:jammy
one more time. Let's kill this container without attaching to it. Run docker ps
, get the IDs or names of the containers you want to kill and say:
docker kill <IDs or names of containers> # e.g. `docker kill fae0f0974d3d 803e1721dad3 20919c49d6e5` would kill those three containers\n
Let's make it a bit easier to keep track of these. Try this
\ndocker run -dit --name my-ubuntu ubuntu:jammy\ndocker kill my-ubuntu\n
Now you can refer to these by a name you set. But now if you tried it again, it'd say that my-ubuntu
exists. If you run docker ps --all
you'll see that the container exists even if it's been stopped. That's because Docker keeps this metadata around until you tell it to stop doing that. You can run docker rm my-ubuntu
which will free up that name or you can run docker container prune
to free up all existing stopped containers (and free up some disk space.)
In the future you can just do
\ndocker run --rm -dit --name my-ubuntu ubuntu:jammy\ndocker kill my-ubuntu\n
This will automatically clean up the container when it's done.
\n","markdown":"\n### Docker Images with Docker\n\nSo it's much easier to do what we did with Docker. Run this command:\n\n```bash\ndocker run --interactive --tty alpine:3.19.1\n# or, to be shorter: docker run -it alpine:3.19.1\n```\n\nA bit easier to remember, right? This will drop you into a Alpine ash shell inside of a container as the root user of that container. When you're done, just run `exit` or hit CTRL+D. Notice that this will grab the [alpine][alpine] image from Docker for you and run it. The `run` part of the command is telling Docker you're going to be executing a container (as opposed to building it.) The `-it` part says you want to be dropped into the container interactively so you can run commands and inspect the container. By default containers run and then exit as soon as they're done. Go ahead and try `docker run alpine:3.19.1`. It'll look it did nothing but it actually starts the container and then, because it has nothing defined for it to do, it just exits.\n\nSo what if you wanted it to execute something? Try this:\n\n```bash\ndocker run alpine:3.19.1 ls\n```\n\nOr let's switch to Ubuntu now, since it's more familiar to most. We'll talk about Alpine later on in-depth.\n\n```bash\ndocker run ubuntu:jammy ls\n```\n\nThe `ls` part at the end is what you pass into the container to be run. As you can see here, it executes the command, outputs the results, and shuts down the container. This is great for running a Node.js server. Since it doesn't exit, it'll keep running until the server crashes or the server exits itself.\n\nSo now what if we want to detach the container running from the foreground? Let's try that.\n\n```bash\ndocker run --detach -it ubuntu:jammy # or, to be shorter: docker run -dit ubuntu:jammy\n```\n\nSo it prints a long hash out and then nothing. Oh no! What happened to it!? Well, it's running in the background. So how do we get ahold of it?\n\n```bash\ndocker ps\n```\n\nThis will print out all the running containers that Docker is managing for you. You should see your container there. So copy the ID or the name and say:\n\n```bash\ndocker attachThese pre-made containers are called images. They basically dump out the state of the container, package that up, and store it so you can use it later. So let's go nab one of these image and run it! We're going to do it first without Docker to show you that you actually already knows what's going on.
\nFirst thing, let's go grab a container off of Docker Hub. Let's grab the latest Node.js container that runs Ubuntu.
\n# start docker contaier with docker running in it connected to host docker daemon\ndocker run -ti -v /var/run/docker.sock:/var/run/docker.sock --privileged --rm --name docker-host docker:26.0.1-cli\n\n# run stock alpine container\ndocker run --rm -dit --name my-alpine alpine:3.19.1 sh\n\n# export running container's file system\ndocker export -o dockercontainer.tar my-alpine\n\n# make container-root directory, export contents of container into it\nmkdir container-root\ntar xf dockercontainer.tar -C container-root/\n\n# make a contained user, mount in name spaces\nunshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot $PWD/container-root ash # this also does chroot for us\nmount -t proc none /proc\nmount -t sysfs none /sys\nmount -t tmpfs none /tmp\n\n# here's where you'd do all the cgroup rules making with the settings you wanted to\n# we're not going to since we did it all in the last lesson\n
So, this isn't totally it. Docker does a lot more for you than just this like networking, volumes, and other things but suffice to say this core of what Docker is doing for you: creating a new environment that's isolated by namespace and limited by cgroups and chroot'ing you into it. So why did we go through all this ceremony? Well, it's because I want you to understand what Docker is doing for you, know that you could do it by hand but since there's a tool that does for you you don't want to. I hold a strong personal belief that tools people need to understand their tools and what they do for them. Every tool you add to your environment adds complexity but should also add ease. If you don't understand the complexity the tool is solving, you resent it and don't get to fully appreciate nor take advantage of what the tool can fully offer.
\nSo how often will you do what we just did? Never. 99.9% of container-utilizers have no idea this is what's happening under the hood. But now that you know it will make you embrace the complexity that Docker adds because you can see why you have it.
\n","markdown":"\nThese pre-made containers are called _images_. They basically dump out the state of the container, package that up, and store it so you can use it later. So let's go nab one of these image and run it! We're going to do it first without Docker to show you that you actually already knows what's going on.\n\nFirst thing, let's go grab a container off of Docker Hub. Let's grab the latest Node.js container that runs Ubuntu.\n\n### Docker Images without Docker\n\n```bash\n# start docker contaier with docker running in it connected to host docker daemon\ndocker run -ti -v /var/run/docker.sock:/var/run/docker.sock --privileged --rm --name docker-host docker:26.0.1-cli\n\n# run stock alpine container\ndocker run --rm -dit --name my-alpine alpine:3.19.1 sh\n\n# export running container's file system\ndocker export -o dockercontainer.tar my-alpine\n\n# make container-root directory, export contents of container into it\nmkdir container-root\ntar xf dockercontainer.tar -C container-root/\n\n# make a contained user, mount in name spaces\nunshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot $PWD/container-root ash # this also does chroot for us\nmount -t proc none /proc\nmount -t sysfs none /sys\nmount -t tmpfs none /tmp\n\n# here's where you'd do all the cgroup rules making with the settings you wanted to\n# we're not going to since we did it all in the last lesson\n```\n\nSo, this isn't totally it. Docker does a lot more for you than just this like networking, volumes, and other things but suffice to say this core of what Docker is doing for you: creating a new environment that's isolated by namespace and limited by cgroups and chroot'ing you into it. So why did we go through all this ceremony? Well, it's because I want you to understand what Docker is doing for you, know that you _could_ do it by hand but since there's a tool that does for you you don't want to. I hold a strong personal belief that tools people need to understand their tools and what they do for them. Every tool you add to your environment adds complexity but should also add ease. If you don't understand the complexity the tool is solving, you resent it and don't get to fully appreciate nor take advantage of what the tool can fully offer.\n\nSo how often will you do what we just did? Never. 99.9% of container-utilizers have no idea this is what's happening under the hood. But now that you know it will make you embrace the complexity that Docker adds because you can see why you have it.\n","slug":"docker-images","title":"Docker Images","section":"Docker","icon":"fish","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/03-docker/A-docker-images.md","nextSlug":"/lessons/docker/docker-images-with-docker","prevSlug":"/lessons/crafting-containers-by-hand/cgroups"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/javascript-on-docker.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/javascript-on-docker.json new file mode 100644 index 0000000..50716d7 --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/javascript-on-docker.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn how to run Node.js, Deno, Bun, and other runtimes in containers using Docker images. Explore different Linux distros like Debian, Alpine, and CoreOS for your containerized applications.","keywords":["Node.js","Docker","containers","Linux distros","Deno","Bun","runtimes"]},"html":"So now what if we wanted to run a container that has Node.js in it? The default Ubuntu container doesn't have Node.js installed. Let's use a different container!
\ndocker run -it node:20\n
The version here is we're using is Node.js version 20. If you run this as-is, it'll drop you directly into Node.js. What version of Linux do you think this is? Let's find out!
\ndocker run -it node:20 cat /etc/issue\n
It's Debian! They made a choice to choose Debian which is a perfectly great distro to use (it's what Ubuntu is based on.)
\nWhat if we wanted to be dropped into bash of that container? Easy! You already know how!
\ndocker run -it node:20 bash\n
Remember, after we identify the container (node), anything we put after get's evaluated instead of the default command identified by the container (in the container node
's case, it runs the command node
by default). This allows us to run whatever command we want! In this case, we're exectuing bash
which puts us directly into a bash shell.
We'll get into later how to select which Linux distros you should use but for now this is just a fun exercise.
\nJust for fun, let's try one of the other Linux distros that you can use with Node.js
\ndocker run -it node:20-alpine cat /etc/issue\n
This one still has Node.js version 20 on it but it's using a much slimmer version of Linux on it, Alpine. We'll talk a lot about Alpine later but know that it's possible.
\ndocker run -it denoland/deno:centos-1.42.4\ndocker run -it denoland/deno:centos-1.42.4 deno\n
This will allow you to run the alternative to Node.js JavaScript runtime, Deno. This command should log out "Welcome to Deno!" and then exit.
\nThis operating system is another good candiate for your Linux distro for you containers, CoreOS which is a Fedora/IBM product.
\nThe second command will actually get you into the Deno REPL to play around with Deno.
\ndocker run -it oven/bun:1.1.3 bun repl\ndocker run -it oven/bun:1.1.3 cat /etc/issue\n
Like above, the first command will get you into Bun, another JS runtime based on Safari's JavaScript engine JavaScriptCore (as opposed to Chrome's V8.)
\nThe second command will let you see that by default Bun uses Debian.
\n# you don't have to run all of these, just wanted to show you the variety of what's available\ndocker run -it ruby:3.3\ndocker run -it golang:1.22.2\ndocker run -it rust:1.77.2\ndocker run -it php:8.2\ndocker run -it python:3.12.3\n
Here's just a few but as you can imagine, just about every run time has a pre-made container for them. And in the case yours doesn't, I'll show you how to make it!
\n","markdown":"\n## Node.js on Containers\n\nSo now what if we wanted to run a container that has Node.js in it? The default Ubuntu container doesn't have Node.js installed. Let's use a different container!\n\n```bash\ndocker run -it node:20\n```\n\nThe version here is we're using is Node.js version 20. If you run this as-is, it'll drop you directly into Node.js. What version of Linux do you think this is? Let's find out!\n\n```bash\ndocker run -it node:20 cat /etc/issue\n```\n\nIt's Debian! They made a choice to choose Debian which is a perfectly great distro to use (it's what Ubuntu is based on.)\n\nWhat if we wanted to be dropped into bash of that container? Easy! You already know how!\n\n```bash\ndocker run -it node:20 bash\n```\n\nRemember, after we identify the container ([node][node]), anything we put after get's evaluated instead of the default command identified by the container (in the container `node`'s case, it runs the command `node` by default). This allows us to run whatever command we want! In this case, we're exectuing `bash` which puts us directly into a bash shell.\n\nWe'll get into later how to select which Linux distros you should use but for now this is just a fun exercise.\n\nJust for fun, let's try one of the other Linux distros that you can use with Node.js\n\n```bash\ndocker run -it node:20-alpine cat /etc/issue\n```\n\nThis one still has Node.js version 20 on it but it's using a much slimmer version of Linux on it, Alpine. We'll talk a lot about Alpine later but know that it's possible.\n\n## Deno\n\n```bash\ndocker run -it denoland/deno:centos-1.42.4\ndocker run -it denoland/deno:centos-1.42.4 deno\n```\n\nThis will allow you to run the alternative to Node.js JavaScript runtime, Deno. This command should log out \"Welcome to Deno!\" and then exit.\n\nThis operating system is another good candiate for your Linux distro for you containers, CoreOS which is a Fedora/IBM product.\n\nThe second command will actually get you into the Deno REPL to play around with Deno.\n\n## Bun\n\n```bash\ndocker run -it oven/bun:1.1.3 bun repl\ndocker run -it oven/bun:1.1.3 cat /etc/issue\n```\n\nLike above, the first command will get you into Bun, another JS runtime based on Safari's JavaScript engine JavaScriptCore (as opposed to Chrome's V8.)\n\nThe second command will let you see that by default Bun uses Debian.\n\n## A few other runtimes\n\n```bash\n# you don't have to run all of these, just wanted to show you the variety of what's available\ndocker run -it ruby:3.3\ndocker run -it golang:1.22.2\ndocker run -it rust:1.77.2\ndocker run -it php:8.2\ndocker run -it python:3.12.3\n```\n\nHere's just a few but as you can imagine, just about every run time has a pre-made container for them. And in the case yours doesn't, I'll show you how to make it!\n\n[node]: https://hub.docker.com/_/node\n","slug":"javascript-on-docker","title":"Javascript on Docker","section":"Docker","icon":"fish","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/03-docker/C-javascript-on-docker.md","nextSlug":"/lessons/docker/tags","prevSlug":"/lessons/docker/docker-images-with-docker"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/tags.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/tags.json new file mode 100644 index 0000000..8823dac --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/docker/tags.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn how to manage Docker container versions, from using the latest tag to specifying specific versions for Node.js and exploring Alpine Linux for minimalistic container deployments. Discover the benefits of choosing lightweight Alpine containers over larger Ubuntu or Debian images for faster deployment, reduced storage costs, and enhanced security.","keywords":["Docker containers","version management","Alpine Linux","Node.js","container deployment","security","minimalist containers"]},"html":"So far we've just been running containers with random tags that I chose. If you run docker run -it node
the tag implicitly is using the latest
tag. When you say docker run -it node
, it's the same as saying docker run -it node:latest
. The :latest
is the tag. This allows you to run different versions of the same container, just like you can install React version 17 or React version 18: some times you don't want the latest. Let's say you have a legacy application at your job and it depends on running on Node.js 20 (update your app, Node.js is already past end-of-life) then you can say
docker run -it node:20 bash\n
Once in the shell, run node --version
and you'll see the Node.js version is 20..! Neat! This is helpful because now we can fix our Node.js version to the one our app expects. Hop back over to the Docker Hub page for the node container. Take a look at all the version of the node container you can download. Let's try another one.
docker run node:20-alpine cat /etc/issue\n
You'll see this is running an entirely different OS all together: Alpine! Alpine Linux is a very, very tiny distro of Linux made for containers and specifically because it is tiny. Alpine containers are bare bones: if you want anything in them, you're going to have to do it yourself. This is in opposition to the Ubuntu and Debian containers: they ship the kitchen sink with them which is both convenient and much bigger in size. Alpine images are about five megabytes whereas Ubuntu is close to two hundred megabytes. As you can imagine, this can make a difference in how fast you can deploy and can cost significantly less in terms of storage and network traffic. It's also in general better to have less unnecessary things in your containers: less is more in terms of security. If an attacker tries to execute a Python exploit on your container but your container doesn't have Python then their attack won't work.
\nWe'll get more into how to ship containers to production but I'll leave you with this pro-tip: have a development container which has all the bells, whistles, debugging tools, etc. that you need. Then have a production container that's minimalist as possibly can be. You'll get the best of both worlds.
\n","markdown":"\nSo far we've just been running containers with random tags that I chose. If you run `docker run -it node` the tag implicitly is using the `latest` tag. When you say `docker run -it node`, it's the same as saying `docker run -it node:latest`. The `:latest` is the tag. This allows you to run different versions of the same container, just like you can install React version 17 or React version 18: some times you don't want the latest. Let's say you have a legacy application at your job and it depends on running on Node.js 20 (update your app, Node.js is already past end-of-life) then you can say\n\n```bash\ndocker run -it node:20 bash\n```\n\nOnce in the shell, run `node --version` and you'll see the Node.js version is 20._._! Neat! This is helpful because now we can fix our Node.js version to the one our app expects. Hop back over to [the Docker Hub page for the node container][node]. Take a look at all the version of the node container you can download. Let's try another one.\n\n```bash\ndocker run node:20-alpine cat /etc/issue\n```\n\nYou'll see this is running an entirely different OS all together: Alpine! [Alpine Linux][alpine] is a very, very tiny distro of Linux made for containers and specifically because it is tiny. Alpine containers are bare bones: if you want _anything_ in them, you're going to have to do it yourself. This is in opposition to the Ubuntu and Debian containers: they ship the kitchen sink with them which is both convenient and much bigger in size. Alpine images are about five megabytes whereas Ubuntu is close to two hundred megabytes. As you can imagine, this can make a difference in how fast you can deploy and can cost significantly less in terms of storage and network traffic. It's also in general better to have less unnecessary things in your containers: less is more in terms of security. If an attacker tries to execute a Python exploit on your container but your container doesn't have Python then their attack won't work.\n\nWe'll get more into how to ship containers to production but I'll leave you with this pro-tip: have a development container which has all the bells, whistles, debugging tools, etc. that you need. Then have a production container that's minimalist as possibly can be. You'll get the best of both worlds.\n\n[node]: https://hub.docker.com/_/node/\n[alpine]: https://hub.docker.com/_/alpine\n","slug":"tags","title":"Tags","section":"Docker","icon":"fish","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/03-docker/D-tags.md","nextSlug":"/lessons/docker/docker-cli","prevSlug":"/lessons/docker/javascript-on-docker"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/a-note-on-expose.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/a-note-on-expose.json new file mode 100644 index 0000000..71e160e --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/a-note-on-expose.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Understanding the usage and limitations of the `EXPOSE` instruction in Docker, its intended purpose to expose container ports to the host machine, and the optional `-P` flag for mapping ports to random host ports. Considerations include documentation benefits and deliberate port mapping.","keywords":["Docker EXPOSE instruction","Docker port mapping","Dockerfile port documentation"]},"html":"This was a point of confusion for me so I'm going to try to clear it up for you. There is an instruction called EXPOSE <port number>
that its intended use is to expose ports from within the container to the host machine. However if we don't do the -p 3000:3000
it still isn't exposed so in reality this instruction doesn't do much. You don't need EXPOSE
.
There are two caveats to that. The first is that it could be useful documentation to say that "I know this Node.js service listens on port 3000 and now anyone who reads this Docekrfile will know that too." I would challenge this that I don't think the Dockerfile is the best place for that documentation
\nThe second caveat is that instead of -p 3000:3000
you can do -P
. This will take all of the ports you exposed using EXPOSE
and will map them to random ports on the host. You can see what ports it chose by using docker ps
. It'll say something like 0.0.0.0:32769->3000/tcp
so you can see in this case it chose 32769
. Again, I'd prefer to be deliberate about which ports are being mapped.
Okay, all looking good so far. Let's make this app go one step further. Let's have it have an npm install step! In the directory where your app is, put this:
\n// this is the sample app from fastify.dev\n\n// Require the framework and instantiate it\nconst fastify = require("fastify")({ logger: true });\n\n// Declare a route\nfastify.get("/", function handler(request, reply) {\n reply.send({ hello: "world" });\n});\n\n// Run the server!\nfastify.listen({ port: 8080, host: "0.0.0.0" }, (err) => {\n if (err) {\n fastify.log.error(err);\n process.exit(1);\n }\n});\n
\nThis is a Fastify server. Fastify is a server-side framework (like Express) for Node.js and one I've used several times. This is going to require that we npm install
the dependencies. So in your project do the following
npm init -y # this will create a package.json for you without asking any questions\nnpm install fastify\n
āļø Link to the package.json
\nNow try running node index.js
to run the Node.js server. You should see it running and logging out info whenever you hit an endpoint. Cool, so now that we have a full featured Node.js app, let's containerize it.
If we tried to build it and run it right now it'd fail because we didn't npm install
the dependencies. So now right after the COPY
we'll add a RUN
.
FROM node:20\n\nUSER node\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node . .\n\nRUN npm ci\n\nCMD ["node", "index.js"]\n
docker build -t more-complicated-app .\ndocker run -it -p 8080:8080 --name my-app --rm --init more-complicated-app\n
We changed the COPY
to copy everything in the directory. Right now you probably have a node_modules
but if you're building a container directly from a repo it won't copy the node_modules
so we have to operate under the assumption that those won't be there. Feel free even to delete them if you want.
Let's go ahead and add a .dockerignore
file to the root of the project that prevents Docker from copying the node_modules
. This has the same format as a .gitignore
.
node_modules/\n.git/\n
We then added a RUN
instruction to run a command inside of the container. If you're not familiar with npm ci
it's very similar to npm install
with a few key differences: it'll follow the package-lock.json
exactly (where npm install
will ignore it and update it if newer patch versions of your dependencies are available) and it'll automatically delete node_modules
if it exists. npm ci
is made for situations like this.
Now if you try to build again, it may fail with permissions issues. Why? Well, when you have WORKDIR
create a directory, it does so as root (depending on which version of Docker you're using) which means that the node user won't have enough permissions to modify that directory. We could either use RUN
to change the user or we could use RUN
to make the directory in the first place as node. Let's do the latter.
Generally it's encouraged to not rely on WORKDIR
to get it right and just do it yourself.
FROM node:20\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node . .\n\nRUN npm ci\n\nCMD ["node", "index.js"]\n
\ndocker build -t more-complicated-app .\ndocker run -it -p 8080:8080 --name my-app --rm --init more-complicated-app\n
Now try building and running your container. It should work now! Yay!
\n\n\n","markdown":"\nOkay, all looking good so far. Let's make this app go one step further. Let's have it have an npm install step! In the directory where your app is, put this:\n\n```javascript\n// this is the sample app from fastify.dev\n\n// Require the framework and instantiate it\nconst fastify = require(\"fastify\")({ logger: true });\n\n// Declare a route\nfastify.get(\"/\", function handler(request, reply) {\n reply.send({ hello: \"world\" });\n});\n\n// Run the server!\nfastify.listen({ port: 8080, host: \"0.0.0.0\" }, (err) => {\n if (err) {\n fastify.log.error(err);\n process.exit(1);\n }\n});\n```\n\n[āļø Link to the code][node-file]\n\nThis is a [Fastify][fastify] server. Fastify is a server-side framework (like Express) for Node.js and one I've used several times. This is going to require that we `npm install` the dependencies. So in your project do the following\n\n```bash\nnpm init -y # this will create a package.json for you without asking any questions\nnpm install fastify\n```\n\n[āļø Link to the package.json][package-file]\n\nNow try running `node index.js` to run the Node.js server. You should see it running and logging out info whenever you hit an endpoint. Cool, so now that we have a full featured Node.js app, let's containerize it.\n\nIf we tried to build it and run it right now it'd fail because we didn't `npm install` the dependencies. So now right after the `COPY` we'll add a `RUN`.\n\n```dockerfile\nFROM node:20\n\nUSER node\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node . .\n\nRUN npm ci\n\nCMD [\"node\", \"index.js\"]\n```\n\n```bash\ndocker build -t more-complicated-app .\ndocker run -it -p 8080:8080 --name my-app --rm --init more-complicated-app\n```\n\nWe changed the `COPY` to copy everything in the directory. Right now you probably have a `node_modules` but if you're building a container directly from a repo it won't copy the `node_modules` so we have to operate under the assumption that those won't be there. Feel free even to delete them if you want.\n\nLet's go ahead and add a `.dockerignore` file to the root of the project that prevents Docker from copying the `node_modules`. This has the same format as a `.gitignore`.\n\n```\nnode_modules/\n.git/\n```\n\nWe then added a `RUN` instruction to run a command inside of the container. If you're not familiar with `npm ci` it's very similar to `npm install` with a few key differences: it'll follow the `package-lock.json` exactly (where `npm install` will ignore it and update it if newer patch versions of your dependencies are available) and it'll automatically delete `node_modules` if it exists. `npm ci` is made for situations like this.\n\nNow if you try to build again, it _may_ fail with permissions issues. Why? Well, when you have `WORKDIR` create a directory, it does so as root (depending on which version of Docker you're using) which means that the node user won't have enough permissions to modify that directory. We could either use `RUN` to change the user or we could use `RUN` to make the directory in the first place as node. Let's do the latter.\n\nGenerally it's encouraged to not rely on `WORKDIR` to get it right and just do it yourself.\n\n```dockerfile\nFROM node:20\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node . .\n\nRUN npm ci\n\nCMD [\"node\", \"index.js\"]\n```\n\n[āļø Link to the Dockerfile][dockerfile-file]\n\n```bash\ndocker build -t more-complicated-app .\ndocker run -it -p 8080:8080 --name my-app --rm --init more-complicated-app\n```\n\nNow try building and running your container. It should work now! Yay!\n\n> **NOTE:** Make sure you don't bind your app to host `localhost` (like if you put `localhost` instead of `0.0.0.0` in the host in our Fastify app.) This will make it so the app is only available _inside_ the container. If you see `connection reset` instead of when you're expecting a response, this a good candidate for what's happening (because this definitely didn't _just_ happen to me š.) You need to have the `host: \"0.0.0.0\"` in your Node.js app\n\n[node-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-more-complicated-nodejs-app/index.js\n[package-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-more-complicated-nodejs-app/package.json\n[dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-more-complicated-nodejs-app/Dockerfile\n[fastify]: https://fastify.dev/\n","slug":"build-a-more-complicated-nodejs-app","title":"Build a More Complicated Node.js App","section":"Dockerfiles","icon":"file-lines","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/04-dockerfiles/C-build-a-more-complicated-nodejs-app.md","nextSlug":"/lessons/dockerfiles/a-note-on-expose","prevSlug":"/lessons/dockerfiles/build-a-nodejs-app"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/build-a-nodejs-app.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/build-a-nodejs-app.json new file mode 100644 index 0000000..7060e13 --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/dockerfiles/build-a-nodejs-app.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"Build a Node.js App","description":"Learn how to set up a basic Node.js application inside Docker with detailed steps on copying files, exposing ports, and user permissions. Enhance your Dockerfile skills by optimizing file structures and using instructions like COPY, USER, and WORKDIR effectively.","keywords":["Dockerfile","Node.js application","Docker setup","copy files in Docker","expose ports in Docker","user permissions in Docker","WORKDIR instruction"]},"html":"NOTE: Make sure you don't bind your app to host
\nlocalhost
(like if you putlocalhost
instead of0.0.0.0
in the host in our Fastify app.) This will make it so the app is only available inside the container. If you seeconnection reset
instead of when you're expecting a response, this a good candidate for what's happening (because this definitely didn't just happen to me š.) You need to have thehost: "0.0.0.0"
in your Node.js app
So now let's dig into some more advance things you can do with a Dockerfile. Let's first make our project a real Node.js application. Make a file called index.js
and put this in there.
const http = require("http");\n\nhttp\n .createServer(function (request, response) {\n console.log("request received");\n response.end("omg hi", "utf-8");\n })\n .listen(3000);\nconsole.log("server started");\n
\nThis more-or-less that most barebones Node.js app you can write. It just responds to HTTP traffic on port 3000. Go ahead and try running it on your local computer (outside of Docker) by running node index.js
. Open localhost:3000 in your browser to give it a shot.
Okay, so let's get this running inside Docker now. First thing is we have to copy this file from your local file system into the container. We'll use a new instruction, COPY
. Modify your Dockerfile to say:
FROM node:20\n\nCOPY index.js index.js\n\nCMD ["node", "index.js"]\n
\nThis will copy your index.js file from your file system into the Docker file system (the first index.js is the source and the second index.js is the destination of that file inside the container.)
\nWe then modified the CMD
to start the server when we finally do run the container. Now run
docker build -t my-node-app .\ndocker run --name my-app --rm my-node-app\n
You might need to open another terminal and type docker kill my-app
.
Now your Node.js app is running inside of a container managed by Docker! Hooray! But one problem, how do we access it? If you open locahlost:3000 now, it doesn't work! We have to tell Docker to expose the port. So let's do that now. Stop your container from running and run it again like this.
\nTry stopping your server now. Your normal CTRL+C won't work. Node.js itself doesn't handle SIGINT (which is what CTRL+C is) in and of itself. Instead you either have to handle it yourself inside of your Node.js code (preferable for real apps) or you can tell Docker to handle it with the --init
flag. This uses a package called tini to handle shutdown signal for you.
docker run --init --publish 3000:3000 --rm my-node-app # or you can use -p instead of --publish\n
The publish
part allows you to forward a port out of a container to the host computer. In this case we're forwarding the port of 3000
(which is what the Node.js server was listening on) to port 3000
on the host machine. The 3000
represents the port on the host machine and the second 3000
represents what port is being used in the container. If you did docker run --publish 8000:3000 my-node-app
, you'd open localhost:8000
to see the server (running on port 3000
inside the container).
Next, let's organize ourselves a bit better. Right now we're putting our app into the root directory of our container and running it as the root user. This both messy and unsafe. If there's an exploit for Node.js that get released, it means that whoever uses that exploit on our Node.js server will doing so as root which means they can do whatever they want. Ungood. So let's fix that. We'll put the directory inside our home directory under a different users.
\nFROM node:20\n\nUSER node\n\nCOPY index.js /home/node/code/index.js\n\nCMD ["node", "/home/node/code/index.js"]\n
The USER
instruction let's us switch from being the root user to a different user, one called "node" which the node:20
image has already made for us. We could make our own user too using bash commands but let's just use the one the node image gave us. (More or less you'd run RUN useradd -ms /bin/bash lolcat
to add a lolcat user.)
Notice we're now copying inside of the user's home directory. This is because they'll have proper permissions to interact with those files whereas they may not if we were outside of their home directory. You'll save yourself a lot of permission wrangling if you put it in a home directory. But we'll have to add a flag to the COPY
command to make sure the user owns those files. We'll do that with --chown=node:node
where the first node
is the user and the second node
is the user group.
It's no big deal that the "code" directory doesn't exist, COPY
will create it.
The two commands COPY
and ADD
do very similar things with a few key differences. ADD
can also accept, in addition to local files, URLs to download things off the Internet and it will also automatically unzip any tar files it downloads or adds. COPY
will just copy local files. Use COPY
unless you need to unzip something or are downloading something.
Great. Let's make everything a bit more succint by setting a working directory
\nFROM node:20\n\nUSER node\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node index.js .\n\nCMD ["node", "index.js"]\n
\nWORKDIR
works as if you had cd
'd into that directory, so now all paths are relative to that. And again, if it doesn't exist, it will create it for you.
Now we just tell COPY
to copy the file into the same directory. Now we're giving it a directory instead of a file name, it'll just assume we want the same name. You could rename it here if you wanted.
So far we've been focusing a lot on running containers and haven't much dug into building them. This is on purpose because most of benefit of containers for developers comes from the running of containers. If you learn one thing, it should be how to run them. In fact I'll event venture to say that most developers really only ever need to know how to run them. But you, you're going to learn how to write them. It's an extra superpower.
\nThat said, let's learn to build our own containers. We'll again be using Docker for this though there are other ways to do this. Docker has a special file called a Dockerfile
which allows you to outline how a container will be built. Each line in a Docker file is a new a directive of how to change your Docker container.
A big key with Docker container is that they're supposed to be disposable. You should be able to create them and throw them away as many times as necessary. In other words: adopt a mindset of making everything short-lived. There are other, better tools for long-running, custom containers.
\nThe (imperfect) analogy that people use sometimes is your containers should be "cattle, not pets". You design containers so you can easily create and destroy them as much as necessary. The analogy here is that you name your pets and take special care of them whereas you have a thousand cattle and can't name or take special care of them, just the herd.
\nLet's make the most basic Dockerfile ever. Let's make a new folder, maybe on your desktop. Put a file in there called Dockerfile
(no extension.) In your file, put this.
FROM node:20\n\nCMD ["node", "-e", "console.log(\\"hi lol\\")"]\n
\nThe first thing on each line (FROM
and CMD
in this case) are called instructions. They don't technically have to be all caps but it's convention to do so so that the file is easier to read. Each one of these instruction incrementally changes the container from the state it was in previously, adding what we call a layer.
Let's go ahead and build our container. Run (from inside of the directory of where your Dockerfile is)
\ndocker build .\n
You should see it out put a bunch of stuff and it'll leave you with the hash of an image. After each instruction, you'll see a hash similar to the ones we've been using for the IDs for the containers. You know why that is? It's because each one of those layers is in-and-of themselves a valid container image! This ends up being important later and we'll discuss it in a bit.
\nOur container has two instructions in its Dockerfile, but actually it has many, many more. How? The first instruction, FROM node:20
actually means start with the node
container. That container itself comes from another Dockerfile which build its own container, which itself comes from another Dockerfile, which comes ultimately from the Debian image.
This is something very powerful about Docker: you can use images to build other images and build on the work of others. Instead of having to worry about how to install Debian and all the necessary items to build Node.js from its source, we can just start with a well-put-together image from the community.
\nOkay, so we start with node:20
and then we add the CMD
instruction. There will only ever be one of these in effect in a Dockerfile. If you have multiple it'll just take the last one. This is what you want Docker to do when someone runs the container. In our case, we're running node -e "console.log('hi lol')"
from within the container. node -e
, if you don't know, will run whatever is inside of the quotes with Node.js. In this case, we're logging out hi lol
to the console.
You can put CMD node -e "console.log('hi lol')"
as that last line and it'll work but it's not the preferred way of doing it. This won't actually go through bash which itself is simpler and usually safer. I do it this way because the docs strongly encourage you to do it this way.
So, in essence, our containers nabs a node:20
container and then when we have it execute a node
command when you run it. Let's try it. Grab the hash from your build and run
docker run <ID>\n
It's a little inconvenient to always have to refer to it by ID, it'd be easier if it had a name. So let's do that! Try
\ndocker build . --tag my-node-app ## or -t instead of --tag\ndocker run my-node-app\n
Much easier to remember the name rather than a hash. If you want to version it yourself, you can totally do this:
\ndocker build -t my-node-app:1 .\ndocker run my-node-app:1\n
Now change your Dockerfile
so that it logs out wat
instead of hi lol
. After you do that.
docker build -t my-node-app:2 .\ndocker run my-node-app:2\ndocker run my-node-app:1\n
You can version your containers and hold on to older ones, just in case!
\n","markdown":"\nSo far we've been focusing a lot on running containers and haven't much dug into building them. This is on purpose because most of benefit of containers for developers comes from the running of containers. If you learn one thing, it should be how to run them. In fact I'll event venture to say that _most_ developers really only ever need to know how to run them. But you, you're going to learn how to write them. It's an extra superpower.\n\nThat said, let's learn to build our own containers. We'll again be using Docker for this though there are other ways to do this. Docker has a special file called a `Dockerfile` which allows you to outline how a container will be built. Each line in a Docker file is a new a directive of how to change your Docker container.\n\nA _big key_ with Docker container is that they're supposed to be disposable. You should be able to create them and throw them away as many times as necessary. In other words: adopt a mindset of making everything short-lived. There are other, better tools for long-running, custom containers.\n\nThe (imperfect) analogy that people use sometimes is your containers should be \"[cattle, not pets][cattle]\". You design containers so you can easily create and destroy them as much as necessary. The analogy here is that you name your pets and take special care of them whereas you have a thousand cattle and can't name or take special care of them, just the herd.\n\nLet's make the most basic Dockerfile ever. Let's make a new folder, maybe on your desktop. Put a file in there called `Dockerfile` (no extension.) In your file, put this.\n\n## The most basic Dockerfile-based Container\n\n```dockerfile\nFROM node:20\n\nCMD [\"node\", \"-e\", \"console.log(\\\"hi lol\\\")\"]\n```\n\n[āļø Link to the Dockerfile][dockerfile]\n\nThe first thing on each line (`FROM` and `CMD` in this case) are called _instructions_. They don't technically have to be all caps but it's convention to do so so that the file is easier to read. Each one of these instruction incrementally changes the container from the state it was in previously, adding what we call a _layer_.\n\nLet's go ahead and build our container. Run (from inside of the directory of where your Dockerfile is)\n\n```bash\ndocker build .\n```\n\nYou should see it out put a bunch of stuff and it'll leave you with the hash of an image. After each instruction, you'll see a hash similar to the ones we've been using for the IDs for the containers. You know why that is? It's because each one of those layers is in-and-of themselves a valid container image! This ends up being important later and we'll discuss it in a bit.\n\nOur container has two instructions in its Dockerfile, but actually it has many, many more. How? The first instruction, `FROM node:20` actually means _start_ with the `node` container. That container itself [comes from another Dockerfile][docker-node] which build its own container, which itself [comes from another Dockerfile][buildpack], which comes ultimately from the [Debian][debian] image.\n\nThis is something very powerful about Docker: you can use images to build other images and build on the work of others. Instead of having to worry about how to install Debian and all the necessary items to build Node.js from its source, we can just start with a well-put-together image from the community.\n\nOkay, so we start with `node:20` and then we add the `CMD` instruction. There will only ever be one of these in effect in a Dockerfile. If you have multiple it'll just take the last one. This is what you want Docker to do when someone runs the container. In our case, we're running `node -e \"console.log('hi lol')\"` from within the container. `node -e`, if you don't know, will run whatever is inside of the quotes with Node.js. In this case, we're logging out `hi lol` to the console.\n\nYou _can_ put `CMD node -e \"console.log('hi lol')\"` as that last line and it'll work but it's not the preferred way of doing it. This won't actually go through bash which itself is simpler and usually safer. I do it this way because the docs strongly encourage you to do it this way.\n\nSo, in essence, our containers nabs a `node:20` container and then when we have it execute a `node` command when you run it. Let's try it. Grab the hash from your build and run\n\n```bash\ndocker runGo make any change to your Node.js app. Now re-run your build process. Docker is smart enough to see the your FROM
, RUN
, and WORKDIR
instructions haven't changed and wouldn't change if you ran them again so it uses the same layers it cached from the previous but it can see that your COPY
is different since files changed between last time and this time, so it begins the build process there and re-runs all instructions after that. Pretty smart, right? This is the same mechanism that Docker uses when you pull a new container to download it in pieces. Each one of those corresponds to a layer.
So which part of container-building takes the longest? RUN npm ci
. Anything that has to hit the network is going to take the longest without-a-doubt. The shame is that our package.json
hasn't changed since the previous iteration; we just changed something in our index.js
. So how we make it so we only re-run our npm ci
when package.json changes? Break it into two COPY
instructions!
FROM node:20\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node package-lock.json package.json ./\n\nRUN npm ci\n\nCOPY --chown=node:node . .\n\nCMD ["node", "index.js"]\n
\ndocker build -t layers .\ndocker run -it -p 8080:8080 --name my-app --rm --init layers\n
The first COPY
pulls just the package.json
and the package-lock.json
which is just enough to do the npm ci
. After that we nab the rest of the files. Now if you make changes you avoid doing a full npm install. This is useful and recommended for any dependency installation: apt-get, pip, cargo, gems, etc. as well as any long-running command like building some from source.
We've now built a nice little container for our Node.js app and we absolutely could ship it as-is to production. However there's a few things we can do to make things even faster, cheaper, and more secure.
\nMaking your containers smaller is a good thing for a few reasons. For one, everything tends to get a bit cheaper. Moving containers across the Internet takes time and bits to do. If you can make those containers smaller, things will go faster and you'll require less space on your servers. Often private container registries (like personal Docker Hubs, Azure Container Registry is a good example) charge you by how much storage you're using.
\nBeyond that, having less things in your container means you're less susceptible to bugs. Let's say there's a Python exploit that's going around that allows hackers to get root access to your container. If you don't have Python in your container, you're not vulnerable! And obviously if you do have Python installed (even if you're not using it) you're vulnerable. So let's see how to make your container a bit smaller.
\nIn your previous Dockerfile, change the first line (FROM
)
FROM node:20-alpine\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node package-lock.json package.json ./\n\nRUN npm ci\n\nCOPY --chown=node:node . .\n\nCMD ["node", "index.js"]\n
\nOur image size (by comparing the "Size"
field in in docker inspect my-app
) from 1.1GB to 150MB just like that. We shed quite a bit of cruft that we didn't need in Debian and we didn't even need to change anything in our Dockerfile. Honestly, that's unusual. When you strip everything out typically you'll have to go back and add some of them back in. But in this case we're golden!
Alpine, if you remember, is a bare bones alternative to Debian. It's built on Busybox Linux which is a 2MB distro of Linux (Alpine is 5MB.) node:20-alpine
itself is about 133MB
and node:latest
is about 1.0GB.
When should you select Alpine? My general feeling (this is a Brian Holt opinion, not a community one so take it with a grain of salt) is that the "end destination" container is where Alpine is most useful. It cuts all cruft out which is super helpful for end-deployment sorts of scenarios due to security and size but it also can be annoying for development scenarios because it lacks just about everything necessary for those, making you have to hand install everything you need. In these "middle scenarios" where it's not really the destination and the container is just another tool in your development system (where that's a multi stage build or a development container) I'll reach for Ubuntu or Debian.
\n","markdown":"\nWe've now built a nice little container for our Node.js app and we absolutely could ship it as-is to production. However there's a few things we can do to make things even faster, cheaper, and more secure.\n\n## Making your containers smaller\n\nMaking your containers smaller is a good thing for a few reasons. For one, everything tends to get a bit cheaper. Moving containers across the Internet takes time and bits to do. If you can make those containers smaller, things will go faster and you'll require less space on your servers. Often private container registries (like personal Docker Hubs, Azure Container Registry is a good example) charge you by how much storage you're using.\n\nBeyond that, having less _things_ in your container means you're less susceptible to bugs. Let's say there's a Python exploit that's going around that allows hackers to get root access to your container. If you don't have Python in your container, you're not vulnerable! And obviously if you do have Python installed (even if you're not using it) you're vulnerable. So let's see how to make your container a bit smaller.\n\nIn your previous Dockerfile, change the first line (`FROM`)\n\n```dockerfile\nFROM node:20-alpine\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node package-lock.json package.json ./\n\nRUN npm ci\n\nCOPY --chown=node:node . .\n\nCMD [\"node\", \"index.js\"]\n```\n\n[āļø Link to the Dockerfile][dockerfile-file]\n\nOur image size (by comparing the `\"Size\"` field in in `docker inspect my-app`) from 1.1GB to 150MB just like that. We shed quite a bit of cruft that we didn't need in Debian and we didn't even need to change anything in our Dockerfile. Honestly, that's unusual. When you strip _everything_ out typically you'll have to go back and add some of them back in. But in this case we're golden!\n\nAlpine, if you remember, is a bare bones alternative to Debian. It's built on Busybox Linux which is a 2MB distro of Linux (Alpine is 5MB.) `node:20-alpine` itself is about `133MB` and `node:latest` is about 1.0GB.\n\nWhen should you select Alpine? My general feeling (this is a Brian Holt opinion, not a community one so take it with a grain of salt) is that the \"end destination\" container is where Alpine is most useful. It cuts all cruft out which is super helpful for end-deployment sorts of scenarios due to security and size but it also can be annoying for development scenarios because it lacks just about everything necessary for those, making you have to hand install everything you need. In these \"middle scenarios\" where it's not really the destination and the container is just another tool in your development system (where that's a multi stage build or a development container) I'll reach for Ubuntu or Debian.\n\n[dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/alpine-linux/Dockerfile\n","slug":"alpine-linux","title":"Alpine Linux","section":"Making Tiny Containers","icon":"minimize","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/05-making-tiny-containers/A-alpine-linux.md","nextSlug":"/lessons/making-tiny-containers/making-our-own-alpine-nodejs-container","prevSlug":"/lessons/dockerfiles/layers"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/distroless.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/distroless.json new file mode 100644 index 0000000..2af9f44 --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/distroless.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn about the differences between Alpine and Distroless for Docker containers, focusing on edge cases with Alpine and the stripped-down nature of Distroless. Explore alternative options like Wolfi, Red Hat's Universal Base Image Micro, and Google's Distroless projects, emphasizing security and minimalism.","keywords":["Alpine","Distroless","Docker containers","security","minimalism"]},"html":"You may not want to use Alpine. This blog post goes into depth but let me sum it up with two points:
\nglibc
with musl
. Read the blog post if you want to know more. Suffice to say, unless you're running Kubernetes at a large scale this shouldn't concern you; lots of people run Alpine and never see issues.The four projects to look to here, Wolfi (an open source project), Red Hat's Universal Base Image Micro, Debian's slim variant, and Google's Distroless.
\nYou would be set with any of these. We are going to focus on Distroless because it is currently the most popular but feel free to experiment!
\n"Distroless" is a bit of a lie as it still based on Debian, but to their point, they've stripped away essentially everything except what is 100% necessary to run your containers. This means you need to install everything you need to get running. It means no package manager. It means it is truly as barebones as it can get.
\nLet's build a Node.js distroless image.
\n# build stage\nFROM node:20 AS node-builder\nWORKDIR /build\nCOPY package-lock.json package.json ./\nRUN npm ci\nCOPY . .\n\n# runtime stage\nFROM gcr.io/distroless/nodejs20\nCOPY --from=node-builder --chown=node:node /build /app\nWORKDIR /app\nCMD ["index.js"]\n
\ndocker build -t my-distroless .\ndocker run -it -p 8080:8080 --name my-app --rm --init my-distroless\n
The size (according to my computer) was about 175MB, so not necessarily any smaller than Alpine, but it is indeed using a Debian-derivative Linux instead of Alpine which does exclude a class of rare-but-possible bugs! These days I tend to use Distroless images but honestly I'm fine with anything you choose here. Probably by the time you need something other than an Alpine image you will have 100x surpassed my knowledge and skills with containers or have a whole dev ops org to attend to these nuances.
\nOne note with the Dockerfile: notice we just give it the Node.js file and not the Node.js command. The Distroless container locks it down so it can only run Node.js apps and cannot be given any other command. Just another way they are hyper-focused for security in production.
\n","markdown":"\nYou may not want to use Alpine. [This blog post goes into depth][blog] but let me sum it up with two points:\n\n1. Alpine made some design choices that have some extremely rare edge cases that can cause failures and be _very_ hard to diagnose. This arises from their choice of replacing the typical `glibc` with `musl`. Read the blog post if you want to know more. Suffice to say, unless you're running Kubernetes at a large scale this shouldn't concern you; lots of people run Alpine and never see issues.\n1. Now Alpine isn't the only option!\n\nThe four projects to look to here, [Wolfi (an open source project)][wolfi], [Red Hat's Universal Base Image Micro][ubi], [Debian's slim variant][slim], and [Google's Distroless][distroless].\n\nYou would be set with any of these. We are going to focus on Distroless because it is currently the most popular but feel free to experiment!\n\n\"Distroless\" is a bit of a lie as it still based on Debian, but to their point, they've stripped away essentially everything except what is 100% necessary to run your containers. This means you need to install _everything_ you need to get running. It means no package manager. It means it is truly as barebones as it can get.\n\nLet's build a Node.js distroless image.\n\n```dockerfile\n# build stage\nFROM node:20 AS node-builder\nWORKDIR /build\nCOPY package-lock.json package.json ./\nRUN npm ci\nCOPY . .\n\n# runtime stage\nFROM gcr.io/distroless/nodejs20\nCOPY --from=node-builder --chown=node:node /build /app\nWORKDIR /app\nCMD [\"index.js\"]\n```\n\n[āļø Link to the Dockerfile][dockerfile-file]\n\n```bash\ndocker build -t my-distroless .\ndocker run -it -p 8080:8080 --name my-app --rm --init my-distroless\n```\n\nThe size (according to my computer) was about 175MB, so not necessarily any smaller than Alpine, but it is indeed using a Debian-derivative Linux instead of Alpine which does exclude a class of rare-but-possible bugs! These days I tend to use Distroless images but honestly I'm fine with anything you choose here. Probably by the time you _need_ something other than an Alpine image you will have 100x surpassed my knowledge and skills with containers or have a whole dev ops org to attend to these nuances.\n\nOne note with the Dockerfile: notice we _just_ give it the Node.js file and _not_ the Node.js command. The Distroless container locks it down so it can only run Node.js apps and cannot be given any other command. Just another way they are hyper-focused for security in production.\n\n[blog]: https://martinheinz.dev/blog/92\n[wolfi]: https://wolfi.dev\n[distroless]: https://github.com/GoogleContainerTools/distroless\n[ubi]: https://catalog.redhat.com/software/base-images\n[node-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/distroless/index.js\n[dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/distroless/Dockerfile\n[slim]: https://hub.docker.com/_/debian\n","slug":"distroless","title":"Distroless","section":"Making Tiny Containers","icon":"minimize","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/05-making-tiny-containers/D-distroless.md","nextSlug":"/lessons/making-tiny-containers/static-asset-project","prevSlug":"/lessons/making-tiny-containers/multi-stage-builds"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/making-our-own-alpine-nodejs-container.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/making-our-own-alpine-nodejs-container.json new file mode 100644 index 0000000..728f0cf --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/making-tiny-containers/making-our-own-alpine-nodejs-container.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"Making Our Own Alpine Node.js Container","description":"Learn how to create a custom Node.js Alpine container by installing system dependencies and setting up a minimal Linux container with Node.js and npm. Explore steps to optimize the container size and user setup, mirroring practices from official containers.","keywords":["Node.js Alpine container","Dockerfile tutorial","system dependencies installation","Alpine Linux setup","custom container optimization"]},"html":"Let's take this exercise a bit further. Let's actually make our own Node.js Alpine container. NOTE: I'd suggest always using the official one. They'll keep it up to date with security fixes and they're real good at making containers. Better than I am, anyway. But this is a good exercise for us to go through to learn how to install system dependencies.
\nStart with this in a new Dockerfile.
\nFROM alpine:3.19\n\nRUN apk add --update nodejs npm\n
alpine:latest
would nab you the latest Alpine (3.19 as of writing, if you run into issues with versions, continue with alpine:3.19
instead of alpine:latest
. Otherwise feel free to truck on with alpine:latest
)
RUN apk add --update nodejs npm
will use the Alpine package manager to grab Node.js and npm (they're bundled separately for Alpine.)
docker build -t my-node .\n
If you encounter error like this
\n/home/node/code/node_modules/@hapi/hapi/lib/core.js:51\n actives = new WeakMap(); // Active requests being processed\n ^\n\nSyntaxError: Unexpected token =\n
Try using nodejs-current
instead of nodejs
RUN apk add --update nodejs-current npm\n
Okay so now if you do docker build -t my-node .
. Now try docker run --rm --name my-app -it my-node
. In here you should have a pretty bare bones Linux container but both node -v
and npm -v
should work. I checked and already my container is 72MB.
Keep in mind that Alpine does not use bash for its shell; it uses a different shell called ash
or often just sh
. It's similar enough to bash but there are some differences. It's not really the point of this class so we'll keep the focus on learning just what's necessary.
Let's next make our node
user.
FROM alpine:3.19\n\nRUN apk add --update nodejs npm\n\nRUN addgroup -S node && adduser -S node -G node\n\nUSER node\n
I'm mimicking what the Node.js official container does, which is make a user group of node
with one user in it, node
. Feel free to name them different things if you feel so inclined. Notice we could conceivably combine the two RUN
instructions together but it's generally best practices to keep "ideas" separate. The first RUN
installs dependencies, the second one creates the node
user. Up to you how you do it, neither is wrong per se.
Now we can just copy the rest from the previous Dockerfile! Let's do that.
\nFROM alpine:3.19\n\nRUN apk add --update nodejs npm\n\nRUN addgroup -S node && adduser -S node -G node\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node package-lock.json package.json ./\n\nRUN npm ci\n\nCOPY --chown=node:node . .\n\nCMD ["node", "index.js"]\n
\nIt works! We're down to 89MB (compared to 150MB-ish with the official node:20-alpine
container). Honestly, I'm not entirely sure what we cut out from the other node:20-alpine
container but it's probably important. Again, I'd stick to the official containers where they exist. But hey, we learned how to add a user and install system dependencies! Let's make it even small because why the hell not.
Hey, we're already half-way to ridiculous, let's make our image EVEN SMALLER. Technically we only need npm
to build our app, right? We don't actually need it to run our app. Docker allows you to have what it called multistage builds, we it uses one container to build your app and another to run it. This can be useful if you have big dependencies to build your app but you don't need those dependencies to actually run the app. A C++ or Rust app might be a good example of that: they need big tool chains to compile the apps but the resulting binaries are smaller and don't need those tools to actually run them. Or one perhaps more applicable to you is that you don't need the TypeScript or Sass compiler in production, just the compiled files. We'll actually do that here in a sec, but let's start here with eliminating npm
.
Make a new Dockerfile, call it Dockerfile
.
# build stage\nFROM node:20 AS node-builder\nRUN mkdir /build\nWORKDIR /build\nCOPY package-lock.json package.json ./\nRUN npm ci\nCOPY . .\n\n# runtime stage\nFROM alpine:3.19\nRUN apk add --update nodejs\nRUN addgroup -S node && adduser -S node -G node\nUSER node\nRUN mkdir /home/node/code\nWORKDIR /home/node/code\nCOPY --from=node-builder --chown=node:node /build .\nCMD ["node", "index.js"]\n
Notice we have have two FROM
instructions. This is how you can tell it's multistage. The last container made will be the final one that gets labeled and shipped. Notice we're starting in the full node:20
container since we're not going to ship this container so we can use the kitchen sink to build it before it copying it to a smaller container.
After building everything in the build stage (you can have more than two stages by the way) we move on to the runtime container. In this one we're using Alpine due its size and security benefits. Everything else looks similar to what we were doing before, just now we're going to be copying from the build container instead of the host machine.
\nThe two real key differences are that we don't apk add npm
and we're doing COPY --from=my-node
which means we're copying from the first stage. We do FROM node:20 AS node-builder
so we can refer to node-builder by name which simplifies reading the Dockerfile.
As you may imagine, this means you can copy from any previous stage or if you leave --from
off it'll come from the host machine.
So try it now!
\ndocker build -t my-multi .\ndocker run -it -p 8080:8080 --name my-app --rm --init my-multi\n
Still works! And our container size is down to a cool 72MB as compared to 89MB when we included npm, 150MB when we used node:20-alpine
and 1.1GB when we used node:20
.
Pretty amazing, right? Honestly, how worth is it doing micro optimization like this? Not very. We had to do a decent amount to shave 50% off the final size and now we're stuck maintaining it. I'd rather just start with FROM node:20-alpine
and call it a day. We get all their wisdom for free and we're not stuck with a longer Dockerfile than we need. But it is definitely worth going from 1.1GB to 150MB!
A last note here: file size isn't everything. It's at best weakly correlated with security, it's just a fun metric to measure. In theory you'll save some money on bandwidth but I have to guess you'll spend more engineering salaries making containers tiny than you'll save on bandwidth. I'd much rather have node:20
and have it be maintained by security professionals than trying to do it myself. Just keep that in mind: it can be a fool's errand to chase shaving bytes off your containers.
We're going to do a project now! Feel free to attempt the project first and then follow along with me as I code the answer.
\nWe're going to construct a very basic front end website with Astro, React, TypeScript, and Tailwind. Why these? Because I want it to have a lot of dependencies and a big build step. This class isn't about any of these things but if you want to take a class on React, my intro and intermediate classes are available on Frontend Masters.
\nYou have two choices here: you can either create your own Astro project with npx create-astro@latest
or you can just use my copy of it. I added Tailwind and React to mine but you don't necessarily need to as it doesn't really affect building the project.
Also feel free to use your own static asset project or favorite static assets framework. As long as npm run build
works and you make sure to get the path right for where the assets are to be served from, it doesn't matter.
\n\nDo note I have the complete Dockerfile in there under
\nsolution.Dockerfile
. Only glance at it once you've tried to build it yourself.
You should have your project ready to go now.
\nTo make sure this works right now, run npm run dev
in your console and make sure the app starts okay. You should see a splash screen. Once you're ready to build it, run npm run build
to have it build for production.
The project is to make a multi-stage Dockerfile that build the project in one container and then serves it from a different container using NGINX. If you're not familiar with NGINX, fear not! It is a static file server, which is to say it take takes HTML, CSS, JS, images, fonts, etc. and serves them to your users. It handles all the serving and file headers for you. Using it can be accomplished in few steps. You'll use the nginx:latest
(or nginx:alpine
! up to you) container and copy just the newly built files, not everything (which is in the dist
directory inside of the Astro app) to /usr/share/nginx/html
and the nginx
will take care of the rest. The nginx
container defines CMD
in it and if you don't override it, it starts NGINX for you. Give it a shot! Once you've tried, come back here and we'll do the solution together.
\n\nNGINX runs on port 80 by default, so you probably want to route that something like 8080 on your host machine (otherwise you have to run it as root which no one wants to do.) In other words, use
\n-p 8080:80
when you start Docker.
Scroll down to see my answer.
\n\n\nDone? If you gave it a shot, your Dockerfile probably shouldn't very long. Let's see what I came up with
\nFROM node:20 AS node-builder\nWORKDIR /app\nCOPY . .\nRUN npm ci\nRUN npm run build\n\n# you could totally use nginx:alpine here too\nFROM nginx:latest\nCOPY --from=node-builder /app/dist /usr/share/nginx/html\n
Now if you run this, it should work:
\ndocker build -t my-static-app .\ndocker run -it -p 8080:80 --name my-app --rm --init my-static-app\n
It should be working now! Hooray! Hopefully you're starting to see the power of what Docker can unlock for you.
\n","markdown":"\nWe're going to do a project now! Feel free to attempt the project first and then follow along with me as I code the answer.\n\nWe're going to construct a very basic front end website with Astro, React, TypeScript, and Tailwind. Why these? Because I want it to have a lot of dependencies and a big build step. This class isn't about any of these things but if you want to take a class on React, my [intro][intro] and [intermediate][intermediate] classes are available on Frontend Masters.\n\nYou have two choices here: you can either create your own Astro project with `npx create-astro@latest` or you can just use my copy of it. I added Tailwind and React to mine but you don't necessarily need to as it doesn't really affect building the project.\n\nAlso feel free to use your own static asset project or favorite static assets framework. As long as `npm run build` works and you make sure to get the path right for where the assets are to be served from, it doesn't matter.\n\n[āļø Link to the Project][project]\n\n> Do note I have the complete Dockerfile in there under `solution.Dockerfile`. Only glance at it once you've tried to build it yourself.\n\nYou should have your project ready to go now.\n\nTo make sure this works right now, run `npm run dev` in your console and make sure the app starts okay. You should see a splash screen. Once you're ready to build it, run `npm run build` to have it build for production.\n\nThe project is to make a multi-stage Dockerfile that build the project in one container and then serves it from a different container using NGINX. If you're not familiar with NGINX, fear not! It is a static file server, which is to say it take takes HTML, CSS, JS, images, fonts, etc. and serves them to your users. It handles all the serving and file headers for you. Using it can be accomplished in few steps. You'll use the `nginx:latest` (or `nginx:alpine`! up to you) container and copy **just the newly built files, not everything** (which is in the `dist` directory inside of the Astro app) to `/usr/share/nginx/html` and the `nginx` will take care of the rest. The `nginx` container defines `CMD` in it and if you don't override it, it starts NGINX for you. Give it a shot! Once you've tried, come back here and we'll do the solution together.\n\n> NGINX runs on port 80 by default, so you probably want to route that something like 8080 on your host machine (otherwise you have to run it as root which no one wants to do.) In other words, use `-p 8080:80` when you start Docker.\n\nScroll down to see my answer.\n\n\n\nDone? If you gave it a shot, your Dockerfile probably shouldn't very long. Let's see what I came up with\n\n```Dockerfile\nFROM node:20 AS node-builder\nWORKDIR /app\nCOPY . .\nRUN npm ci\nRUN npm run build\n\n# you could totally use nginx:alpine here too\nFROM nginx:latest\nCOPY --from=node-builder /app/dist /usr/share/nginx/html\n```\n\nNow if you run this, it should work:\n\n```bash\ndocker build -t my-static-app .\ndocker run -it -p 8080:80 --name my-app --rm --init my-static-app\n```\n\nIt should be working now! Hooray! Hopefully you're starting to see the power of what Docker can unlock for you.\n\n[intro]: https://frontendmasters.com/courses/complete-react-v8/\n[intermediate]: https://frontendmasters.com/courses/intermediate-react-v5/\n[project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/static-asset-project\n","slug":"static-asset-project","title":"Static Asset Project","section":"Making Tiny Containers","icon":"minimize","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/05-making-tiny-containers/E-static-asset-project.md","nextSlug":"/lessons/docker-features/bind-mounts","prevSlug":"/lessons/making-tiny-containers/distroless"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/multi-container-projects/docker-compose.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/multi-container-projects/docker-compose.json new file mode 100644 index 0000000..95510cb --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/multi-container-projects/docker-compose.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn how to use Docker Compose to coordinate multiple containers for development environments efficiently. Simplify defining relationships between containers using a YAML file, making it easy to manage complex setups with one command. Explore CI/CD integration possibilities and enhance development productivity by leveraging Docker Compose features.","keywords":["Docker Compose","multiple containers","development environments","CI/CD scenarios","YAML file","containers relationship","productivity"]},"html":"This may be one of the most useful features you learn about Docker. We've been mixing various different facets of deploying your app to production and creating development environments. This feature in particular is geared much more for development environments. Many times when you're developing containers you're not in just a single container environment (though that does happen too.) When this happens, you need to coordinate multiple containers when you're doing local dev and you've seen in the previous chapter, networking, that it's possible if a bit annoying.
\nWith Docker Compose we simplify this a lot. Docker Compose allows us the ability to coordinate multiple containers and do so with one YAML file. This is great if you're developing a Node.js app and it requires a database, caching, or even if you have two+ separate apps in two+ separate containers that depend on each other or all the above! Docker Compose makes it really simple to define the relationship between these containers and get them all running with one docker compose up
.
\n\nIf you see any commands out there with
\ndocker-compose
(key being the-
in there) it's from Docker Compose v1 which is not supported anymore. We are using Docker Compose v2 here. For our purposes there isn't much difference.
Do note that Docker does say that Docker Compose is suitable for production environments if you have a single instance running multiple containers. This is atypical for the most part: if you have multiple containers, typically you want the ability to have many instances.
\nIn addition to working very well dev, Docker Compose is very useful in CI/CD scenarios when you want GitHub Actions or some CI/CD provider to spin up multiple environments to quickly run some tests.
\nOkay so let's get our previous app working: the one with a MongoDB database being connected to by a Node.js app. Create a new file in the root directory of your project called docker-compose.yml
and put this in there:
services:\n api:\n build: api\n ports:\n - "8080:8080"\n links:\n - db\n environment:\n MONGO_CONNECTION_STRING: mongodb://db:27017\n db:\n image: mongo:7\n web:\n build: web\n environment:\n API_URL: http://api:8080\n ports:\n - "8081:80"\n
This should feel familiar even if it's new to you. This is basically all of the CLI configurations we were giving to the two containers but captured in a YAML file.
\nIn service
we define the containers we need for this particular app. We have two: the web
container (which is our app) and the db
container which is MongoDB. We then identify where the Dockerfile is with build
, which ports to expose in ports
, and the environment
variables using that field.
The one interesting one here is the links
field. In this one we're saying that the api
container needs to be connected to the db
container. This means Docker will start this container first and then network it to the api
container. This works the same way as what we were doing in the previous lesson.
The db
container is pretty simple: it's just the mongo
container from Docker Hub. This is actually smart enough to expose 27017 as the port and to make a volume to keep the data around between restarts so we don't actually have to do anything for that. If you needed any other containers, you'd just put them here in services.
We then have a frontend React.js app that is being built by Parcel.js and served by NGINX. Only interesting thing is we are serving API traffic to the api URL which will go over the internal Docker Compose network
\nThere's a lot more to compose files than what I've shown you here but I'll let you explore that on your own time. Click here to see the docs to see what else is possible.
\nThis will start and work now, just run docker compose up
and it'll get going. I just want to do one thing: let's make our app even more productive to develop on. Go to your Dockerfile for the app make it read a such:
\n\nIf you change something and want to make sure it builds, make sure to run
\ndocker compose up --build
. Docker Compose isn't watching for changes when you run up.
FROM node:latest\n\nRUN npm i -g nodemon\n\nUSER node\n\nRUN mkdir /home/node/code\n\nWORKDIR /home/node/code\n\nCOPY --chown=node:node package-lock.json package.json ./\n\nRUN npm ci\n\nCOPY --chown=node:node . .\n\nCMD ["nodemon", "index.js"]\n
Now we can write our code and every time it save it'll restart the server from within the container. This will make this super productive to work with!
\nWhile we're about to get to Kubernetes which will handle bigger deployment scenarios than Docker Compose can, you can use docker-compose up --scale web=10
to scale up your web container to 10 concurrently running containers. This won't work at the moment because they're all trying to listen on the host on port 3000 but we could use something like NGINX or HAProxy to loadbalance amongst the containers. It's a bit more advance use case and less useful for Compose since at that point you should probably just use Kubernetes or something similar. We'll approach it in the Kubernetes chapter.
Next tool we're going to use is one called Kompose. I'm showing you this tool because it's how I start out with Kubernetes when I have a project that I want to use with it. Kompose converts a docker-compose.yml configuration to a Kubernetes configuration. I find this to much more approachable than starting with the myriad configurations you need to get Kubernetes going.
\nClick here to see how to install Kompose on your platform. I did brew install kompose
with Homebrew.
So first let's modify our docker-compose.yml a bit to make it work for Kompose.
\nservices:\n api:\n build: api\n ports:\n - "8080:8080"\n links:\n - db\n depends_on:\n - db\n environment:\n MONGO_CONNECTION_STRING: mongodb://db:27017\n labels:\n kompose.service.type: nodeport\n kompose.image-pull-policy: Never\n db:\n image: mongo:7\n ports:\n - "27017:27017"\n web:\n build: web\n links:\n - api\n depends_on:\n - api\n labels:\n kompose.service.type: LoadBalancer\n kompose.service.expose: true\n kompose.image-pull-policy: Never\n ports:\n - "8081:80"\n
\n\n\nI went ahead here and modified the NGINX config to handle all inbound traffic. We could expose two services but in reality we want NGINX to be our front door and then allow our API to scale independently. I also modified the Node.js app to have correct paths relative to NGINX routes.
\n
We add the NodePort
type to the api service so that we can scale this part of our infra up and Kubernetes will make it bind to different ports. Any app can reach api on 8080 but we can have 50 scaled up instances that it's spreading across.
We add the LoadBalancer
label to web so that Kubernetes will know to expose this particular service to the outside world. What this actually does for you is it spins up a loadbalancer that will distribute the load amongst all of your running pods. Do note tha this one of three ways to expose a service to outside world (by default everything is only expose internally). The other two are NodePort and using an ingress controller. This is a great explainer if you're curious. For now LoadBalancer is perfect. It's actually just a NodePort under the hood in Kubernetes but once you deploy to GCP, AWS, or Azure they'll use their own flavor of load balancer for you. You can also handle this yourself but that's way outside the scope of this course.
Lastly, we need to explicit about the port MongoDB exposes. Locally Docker was able to take care of it but Kubernetes needs us to be super explicity of what's exposed and what's not.
\n\n\nThey used to let you do
\nkompose up
but now they don't. You have to convert the config and then apply the configs.
kompose convert --build local\n
Okay, now that you've done this, run
\nkubectl apply -f '*.yaml'\n
\n\nIf you see an error, make sure you have the quotes, they're needed, and make sure that your docker-compose.yml file doesn't have .yaml for its extension.
\n
To get a bird's eye view of everything running, run kubectl get all
to see everything happening. Critically, we want to see STATUS: Running on all three of our services. If you're seeing something like ErrImagePull or something like that, it means your containers probably aren't pulling locally and you'll need to debug that.
Let's do some Kubernetes magic now. Run kubectl scale --replicas=5 deployment/api
and run kubectl get all
. Just like that, you have five instances of our Node.js app running and Kubernetes smartly routing traffic to each. If one of them becomes unhealthy, Kubernetes will automatically tear it down and spin up a new one. By setting up Kubernetes, you get a lot of cool stuff for free. If you're computer is starting to warm up, feel free to run kubectl scale --replicas=1 deployment/api
to scale down. You can scale the database the same way too but the loadbalancer won't do it but again that's because Kubernetes expects the cloud provider to do that for you.
Once you're done toying, run kubectl delete all --all
. This will tear down everything.
What's super fun is that kubectl is the same tool you'd use to control your production deployment. So everything you just learn would work against Azure, AWS, GCP, etc. All you have to do is change the context from minikube or docker-desktop to Azure, AWS, or GCP. I'm not going to do that but I'll drop the tutorials here so you can play around yourself. Do note these are often not free and if you're not careful, Kubernetes can get expensive!
\nI like to tell people that containers are the "simple" (simple is a relative term here) part and Kubernetes is the "hard" (hard isn't relative; Kubernetes is really hard) part. So if this feels hard, it's because it is.
\nNOTE: Because Kubernetes is long, it's often abbreviates at k8s (k then eight letters then s.)
\nSo let's talk about use cases. Containers by themselves are useful for many, many use cases like production apps, machine learning, setting up environments, developer environments, and one-off experimentations. Kubernetes builds on containers (read: you need to know containers to use Kubernetes.) Kubernetes is a container orchestration tool. It allows you to manage large, complicated clusters of containers to multiple different hosts. It's a complicated tool that solves complicated problems. As such, we are going to do a hello world so you can understand what it is, what it can do, and then leave you to explore more on your own.
\nSo let's go over a few fundamental concepts here.
\nHere's the sad part: doing this in the Windows subsystem for Linux is tough. If you're following along in Windows, I'd say just grab a coffee and watch how this works. It's not important that you actually do this. If you're comfortable in PowerShell, it works well from there or if you can connect to a true Linux VM, it'll work well from there too. Otherwise, just relax while I do this from macOS.
\nSo you're going to need at least one new CLI: kubectl
. kubectl
(see here for how to install) is the tool that allows you to control any Kubernetes cluster, be it local or in the cloud. It's the single unified CLI for managing Kubernetes. I definitely pronounce this as "cube cuddle" because it makes me happy.
After that you, you need to make a choice between minikube
and using Docker Desktop's built in Kubernetes support. If it's all the same to you, I'd suggest using Docker Desktop's because it's easier to use.
minikube
(see here for how to install) is a development tool to get your Kubernetes cluster running on your local computer. You will only ever use this locally.You can have both installed, by the way. These will be called contexts. To switch between the two, you can kubectl config use-context minikube
or kubectl config use-context docker-desktop
. You can also shorten use-context
to use
.
If you're using minikube, make sure you run minikube start
.
If you're using Docker Desktop, you will need to enable Kubernetes.
\n\nClick the gear in the top right of the Docker Desktop app.
\n\nNavigate to the Kubernetes menu and click enable Kubernetes. You will likely have to restart Docker.
\nDo a kubectl cluster-info
to make sure. You should see your control plane running.
Hello! And welcome to the Complete Intro to Containers! The objective of this course is demystify what containers are, describe how they can be useful to you, and walk you through the steps of working with containers so that at the end of this course containers will be another tool available to you in your toolbox. Containers are just getting more important in the industry and now it's not just a tool for ops, it's a tool for developers. Everyone from the designers to the low level system engineers will need to interact with containers on a regular basis. This will help you get ahead of the curve.
\nThis course is aimed at a developer demographic. While all the examples will be dealing with JavaScript applications, you don't necessarily need to be a JavaScript developer to grasp this case; the code will be incidental to the concepts being taught.
\nThis course assumes a very basic grasp of Linux and using the command line. You don't need to be a bash expert but this shouldn't be your first exposure to Linux or the command line. The class will be taught for both macOS and Windows users and will be using Ubuntu and Alpine Linux for the containers. This will also work well for Linux developers but the class won't have any additional instructions for Linux devs but following the macOS steps should be 95% the same. If you are a Windows developer, please be using Windows 10. You'll need to either use WSL 2 or VirtualBox. See the set up instructions below.
\nIf you need to brush up on your Linux basics, I taught a course here that would be super helpful and I strongly suggest you take that first
\nTo see all of the completed project files in a repo, refer here.
\nDo note that containers can take a lot of CPU and memory. If you have a modern-ish processor and 8GB, you will be fine. This could probably be done with some slow down on 4GB but anything lower would be pretty tough.
\nThis can also take a lot of bandwidth because we'll be downloading a lot of things. Be aware of that.
\nI write these courses and take care to avoid making mistakes. However, when teaching hours of material, mistakes are inevitable, both here in the grammar and in the course with the material. However, I (and the wonderful team at Frontend Masters) are constantly correcting the mistakes so that those of you that come later get the best product possible. If you find an error, we'd love to fix it. The best way to do this is to open a pull request or file an issue on the GitHub repo. While I'm always happy to chat and give advice on social media, I can't be tech support for everyone. And if you file it on GitHub, those who come later can Google the same answer you got.
\nMy name is Brian Holt and I am the vice president of product at SQLite Cloud. I love teaching and teaching courses. I was previously on a path to become a university professor before pivoting in tech. Luckily Frontend Masters has afforded me an amazing opportunity to take my practical knowledge I have acquired over the years of working in various roles of tech to share that knowledge with everyone I can.
\nMy current role is trying to make the simple, lightweight, blazing-fast database SQLite scale to enormous proportions to power every web and mobile app. Previous to my current role, I worked as a PM at Snowflake, Stripe, and Microsoft and as a staff engineer at LinkedIn, Netflix, Reddit, and a few other startups. I have had the privilege of having a varied career and to see tech from a lot of angles and I hope to share that with you.
\nI got really interested in containers when I was put in charge at Microsoft of developer experience for JavaScript developers working with Microsoft Azure. At first I was really intimidated by them: they were this big, scary operations tool that was unapproachable to me, a JavaScript developer. Once I started to dig into them, I realized that they were neither complicated nor scary. Containers are surprisingly simple pieces of technology and I promise once we work through them they will be far less scary.
\nPlease catch up with me on social media, would love to chat. I will warn you: I am awful at responding to direct messages!
\n\nAnd hey, if you could take a second and star the repo on GitHub I'd be super appreciative. It helps me reach more people and strokes my fragile ego.
\n","markdown":"\n## Course Objective\n\nHello! And welcome to the Complete Intro to Containers! The objective of this course is demystify what containers are, describe how they can be useful to you, and walk you through the steps of working with containers so that at the end of this course containers will be another tool available to you in your toolbox. Containers are just getting more important in the industry and now it's not just a tool for ops, it's a tool for developers. Everyone from the designers to the low level system engineers will need to interact with containers on a regular basis. This will help you get ahead of the curve.\n\n## Who Are You?\n\nThis course is aimed at a developer demographic. While all the examples will be dealing with JavaScript applications, you don't necessarily need to be a JavaScript developer to grasp this case; the code will be incidental to the concepts being taught.\n\nThis course assumes a very basic grasp of Linux and using the command line. You don't need to be a bash expert but this shouldn't be your first exposure to Linux or the command line. The class will be taught for both macOS and Windows users and will be using Ubuntu and Alpine Linux for the containers. This will also work well for Linux developers but the class won't have any additional instructions for Linux devs but following the macOS steps should be 95% the same. If you are a Windows developer, please be using Windows 10. You'll need to either use [WSL 2][wsl2] or VirtualBox. See the set up instructions below.\n\nIf you need to brush up on your Linux basics, [I taught a course here that would be super helpful][linux] and I strongly suggest you take that first\n\nTo see all of the completed project files in a repo, [refer here][project-files].\n\nDo note that containers can take a lot of CPU and memory. If you have a modern-ish processor and 8GB, you will be fine. This could probably be done with some slow down on 4GB but anything lower would be pretty tough.\n\nThis can also take a lot of bandwidth because we'll be downloading a lot of things. Be aware of that.\n\n## Where to File Issues\n\nI write these courses and take care to avoid making mistakes. However, when teaching hours of material, mistakes are inevitable, both here in the grammar and in the course with the material. However, I (and the wonderful team at Frontend Masters) are constantly correcting the mistakes so that those of you that come later get the best product possible. If you find an error, we'd love to fix it. The best way to do this is to [open a pull request or file an issue on the GitHub repo][issue]. While I'm always happy to chat and give advice on social media, I can't be tech support for everyone. And if you file it on GitHub, those who come later can Google the same answer you got.\n\n## Who Am I?\n\n![Brian teaching a course](/images/social-share-cover.jpg)\n\nMy name is Brian Holt and I am the vice president of product at [SQLite Cloud][sqlitecloud]. I love teaching and teaching courses. I was previously on a path to become a university professor before pivoting in tech. Luckily Frontend Masters has afforded me an amazing opportunity to take my practical knowledge I have acquired over the years of working in various roles of tech to share that knowledge with everyone I can.\n\nMy current role is trying to make the simple, lightweight, blazing-fast database SQLite scale to enormous proportions to power every web and mobile app. Previous to my current role, I worked as a PM at Snowflake, Stripe, and Microsoft and as a staff engineer at LinkedIn, Netflix, Reddit, and a few other startups. I have had the privilege of having a varied career and to see tech from a lot of angles and I hope to share that with you.\n\nI got really interested in containers when I was put in charge at Microsoft of developer experience for JavaScript developers working with Microsoft Azure. At first I was really intimidated by them: they were this big, scary operations tool that was unapproachable to me, a JavaScript developer. Once I started to dig into them, I realized that they were neither complicated nor scary. Containers are surprisingly simple pieces of technology and I promise once we work through them they will be far less scary.\n\nPlease catch up with me on social media, would love to chat. I will warn you: I am awful at responding to direct messages!\n\n- [Twitter][twitter]\n- [GitHub][github]\n- [LinkedIn][linkedin]\n\nAnd hey, if you could take a second and [star the repo on GitHub][gh] I'd be super appreciative. It helps me reach more people and strokes my fragile ego.\n\n[gh]: https://github.com/btholt/complete-intro-to-containers-v2\n[frontend-masters]: https://frontendmasters.com/teachers/brian-holt/\n[fehh]: http://frontendhappyhour.com/\n[fem]: https://frontendmasters.com/\n[twitter]: https://twitter.com/holtbt\n[github]: https://github.com/btholt\n[linkedin]: https://www.linkedin.com/in/btholt/\n[course]: https://frontendmasters.com/courses/complete-intro-containers-v2/\n[issue]: https://github.com/btholt/complete-intro-to-containers-v2/issues\n[project-files]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2\n[linux]: https://frontendmasters.com/courses/linux-command-line/\n[sqlitecloud]: https://sqlitecloud.io/\n[wsl2]: https://learn.microsoft.com/en-us/windows/wsl/install\n","slug":"introduction","title":"Introduction","section":"Welcome","icon":"info-circle","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/01-welcome/A-introduction.md","nextSlug":"/lessons/welcome/set-up","prevSlug":null}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/welcome/set-up.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/welcome/set-up.json new file mode 100644 index 0000000..03098ae --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/welcome/set-up.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Learn about Docker Desktop, a convenient desktop app GUI to control Docker on your computer. Follow installation instructions for Windows, macOS, and Linux provided in the document. Understand system requirements, internet usage, and tools used in the course.","keywords":["Docker Desktop","installation instructions","system requirements","internet usage","tools FAQ"]},"html":"Docker Desktop is a desktop app GUI that allows you to control Docker on your computer. You can definitely use Docker and containers without it but it's just a convenience to be able to turns things on and off with an app instead of trying to communicate with the daemon via their client. Suffice to say, at least for this course, please use Docker Desktop. It's free for individuals and small companies.
\nYou will need to set up Docker Desktop if you haven't already.
\nDocker states on its website that it requires 4GB of RAM to run okay. I haven't tried it but that seems like it should be enough. 8GB would really put you in a comfortable spot.
\nFor Windows developers, you'll either need to be on Windows 10 or 11. It doesn't matter what version (Home, Pro, Education, etc.) It used to matter but now Windows allows any version of Windows 10+ to use Docker. Please be sure to follow all the instructions carefully as you may have to do some stuff like enable virtualization and turn on WSL which aren't on by default. This course does not work on Windows 7 or 8 (or 9, lol.) You will see on the Windows page a bunch of information about what version of Windows you need for Windows containers ā ignore that. We're not doing any Windows containers today, just Linux.
\nFor Linux devs, they have instructions for Ubuntu, Debian, RHEL, and Fedora. They also list experimental support for Arch. If you're using something different than those, you're on your own. Generally if you're on Linux I'm going to assume you can translate my macOS instructions into Linux.
\nThis course also assumes you are using an x64 processor or an Apple Silicon processor. This class is untested on 32 bit processors, other ARM, RISC, etc. processors.
\nThis class will use a fair amount of bandwidth as containers can be quite large. Docker does a decent job of caching so once you've downloaded a container once it will cache its layers so you don't have to install it again. If you're on metered or slower Internet, be aware of that.
\nAlso be aware the Docker can eat up your disk space pretty quickly. I have barely used Docker on my new computer and already it's using 2GB of storage with various images. Once Docker is running, run docker image ls
to see what you have locally and type docker image rm <image name>
to remove any that you don't want sticking around if you need to free up space. You can also do this from the Docker Desktop GUI.
The short answer is no.
\nThe slightly longer answer is noooo.
\nThe longer answer is that it's likely most of the course would work on something like podman or nerdctl but I'm not testing any of it so I'm sure you'll run into inconsistencies and I won't be able to help you with it. They're very valid and useful pieces of technology and you should try them but for this course let's stick to Docker.
\n","markdown":"\n## Docker Desktop\n\nDocker Desktop is a desktop app GUI that allows you to control Docker on your computer. You can definitely use Docker and containers without it but it's just a convenience to be able to turns things on and off with an app instead of trying to communicate with the daemon via their client. Suffice to say, at least for this course, please use Docker Desktop. It's free for individuals and small companies.\n\nYou will need to set up Docker Desktop if you haven't already.\n\n- [Installation instructions for Microsoft Windows][windows] (if you're unsure, I suggest doing the WSL2 installation)\n- [Installation instructions for Apple macOS][macos] (make sure to choose if you have an Intel or an Apple chip in your computer)\n- [Installation instructions for Linux][linux]\n\nDocker states on its website that it requires 4GB of RAM to run okay. I haven't tried it but that seems like it should be enough. 8GB would really put you in a comfortable spot.\n\nFor Windows developers, you'll either need to be on Windows 10 or 11. It doesn't matter what version (Home, Pro, Education, etc.) It used to matter but now Windows allows any version of Windows 10+ to use Docker. Please be sure to follow all the instructions carefully as you may have to do some stuff like enable virtualization and turn on WSL which aren't on by default. This course does not work on Windows 7 or 8 (or 9, lol.) You will see on the Windows page a bunch of information about what version of Windows you need for Windows containers ā ignore that. We're not doing any Windows containers today, just Linux.\n\nFor Linux devs, they have instructions for Ubuntu, Debian, RHEL, and Fedora. They also list experimental support for Arch. If you're using something different than those, you're on your own. Generally if you're on Linux I'm going to assume you can translate my macOS instructions into Linux.\n\nThis course also assumes you are using an x64 processor or an Apple Silicon processor. This class is untested on 32 bit processors, other ARM, RISC, etc. processors. \n\n## Internet and Storage\n\nThis class will use a fair amount of bandwidth as containers can be quite large. Docker does a decent job of caching so once you've downloaded a container once it will cache its layers so you don't have to install it again. If you're on metered or slower Internet, be aware of that.\n\nAlso be aware the Docker can eat up your disk space pretty quickly. I have barely used Docker on my new computer and already it's using 2GB of storage with various images. Once Docker is running, run `docker image ls` to see what you have locally and type `docker image rmThat's it Congratulations on completing the Complete Intro to Containers, version 2. On one hand, I feel like this course is pretty dense and a lot of systems-level stuff is thrown at you in a short amount of time. On the other hand, I feel like all of this stuff is actually more approachable than it seems at first impression. I started doing this stuff when I worked at Microsoft because I wanted to be able to sound intelligent when I spoke to the smart people making Azure work and it turns out I really like it. Every since I've always been in and around cloud stuff and it really started with my love for mucking around with containers.
\nLet's review what we talked about:
\nThat's a lot of stuff! Congrats, you are now ahead of the curve on containers and this will serve you whole career. Containers aren't going anywhere; they're just becoming a bigger part of our workflows. Every day you interact with dozens if not hundreds of containers in some way. I see that only increasing as everything becomes a deploy target, from fridges to watches to billboards.
\nThanks, and as always, please let me know how you liked the course!
\nā¤ļø Brian Holt
\n","markdown":"That's it Congratulations on completing the Complete Intro to Containers, version 2. On one hand, I feel like this course is pretty dense and a lot of systems-level stuff is thrown at you in a short amount of time. On the other hand, I feel like all of this stuff is actually more approachable than it seems at first impression. I started doing this stuff when I worked at Microsoft because I wanted to be able to sound intelligent when I spoke to the smart people making Azure work and it turns out I really like it. Every since I've always been in and around cloud stuff and it really started with my love for mucking around with containers.\n\nLet's review what we talked about:\n\n- What containers are\n- chroot and code jails\n- Linux namespaces and how to limit processes on the same OS\n- cgroups and how to limit resources to processes\n- What an image is\n- How to build a JavaScript project in Docker\n- Docker Desktop and dev tools to use with Docker\n- Building Docker images\n- Dockerfiles in depth\n- How to build containers both for production performance and so they rebuild quickly\n- How to make smaller containers\n- Alpine and alternative Linux distros\n- How to do multi stage builds\n- Distroless and other alternatives to Alpine\n- A project on building your own static asset server\n- Bind mounts and volumes\n- Dev containers\n- Networking in Docker\n- Docker Compose and multi container setups\n- Kubernetes and Kompose\n- What other tools are out there\n\nThat's a lot of stuff! Congrats, you are now _ahead_ of the curve on containers and this will serve you whole career. Containers aren't going anywhere; they're just becoming a bigger part of our workflows. Every day you interact with dozens if not hundreds of containers in some way. I see that only increasing as everything becomes a deploy target, from fridges to watches to billboards.\n\nThanks, and as always, please let me know how you liked the course!\n\nā¤ļø [Brian Holt][brian]\n\n[brian]: https://www.twitter.com/holtbt\n","slug":"conclusion","title":"Conclusion","section":"Wrap Up","icon":"graduation-cap","filePath":"/home/runner/work/complete-intro-to-containers-v2/complete-intro-to-containers-v2/lessons/08-wrap-up/B-conclusion.md","nextSlug":null,"prevSlug":"/lessons/wrap-up/docker-alternatives"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/wrap-up/docker-alternatives.json b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/wrap-up/docker-alternatives.json new file mode 100644 index 0000000..1f740d7 --- /dev/null +++ b/_next/data/CivD-M4DIwKkMNDuPXONW/lessons/wrap-up/docker-alternatives.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":"Explore alternatives to Docker for container building, runtime tools, runtimes, orchestrators, and desktop apps. Learn about tools like Podman, Buildah, containerd, gVisor, OpenShift, Nomad, and more as options in the container ecosystem.","keywords":["Docker alternatives","container tools","container runtimes","orchestration tools","Podman","containerd","Kubernetes alternatives"]},"html":"So far we have only talked about Docker and there's a pretty good reason for that: for personal use or developer use, Docker is pretty much the indisputable champion. It has all the developer experience bells and whistles, all the mindshare of developers everywhere, and even all the other tools we are about to talk about like to tout their Docker compatability. However it good to keep in mind that Docker is a for-profit company and thus they are trying to align your incentives to there's and vice versa. It's good to know what else exists out there.
\nI don't have a lot of experience with any of these as I've exclusively used Docker my whole career, but I wanted to get these names out in front of you so you recognize what they are.
\nThis is what would replace docker build
. What tools out there exist for building containers?
This is what would replace docker run
. This is the toolset that orchestrates the runtime that actually runs the container.
This is the actual code executing your container.
\nThese are alternatives to Kubernetes (and somewhat Docker Compose)
\nAlternatives to the Docker Desktop app.
\ne||125Complete Intro to Containers
Complete Intro to Containers
v2
Table of Contents
Welcome
Crafting Containers by Hand
Docker Features
Multi Container Projects