diff --git a/images/doppler-kubernetes-operator/README.md b/images/doppler-kubernetes-operator/README.md new file mode 100644 index 0000000000..798e311db0 --- /dev/null +++ b/images/doppler-kubernetes-operator/README.md @@ -0,0 +1,46 @@ + +# images/doppler-kubernetes-operator +| | | +| - | - | +| **OCI Reference** | `cgr.dev/chainguard/images/doppler-kubernetes-operator` | + + +* [View Image in Chainguard Academy](https://edu.chainguard.dev/chainguard/chainguard-images/reference/images/doppler-kubernetes-operator/overview/) +* [View Image Catalog](https://console.enforce.dev/images/catalog) for a full list of available tags. +* [Contact Chainguard](https://www.chainguard.dev/chainguard-images) for enterprise support, SLAs, and access to older tags.* + +--- + + + +Automatically sync secrets from Doppler to Kubernetes and auto-reload deployments when secrets change. + + + +## Download this Image +The image is available on `cgr.dev`: + +``` +docker pull cgr.dev/chainguard/images/doppler-kubernetes-operator:latest +``` + + + +doppler-kubernetes-operator is a Kubernetes operator, which can be deployed using helm. Refer to the [upstream repositories documentation](https://github.com/DopplerHQ/kubernetes-operator) for how to get started with doppler-kubernetes-operator. + +```shell +helm repo add doppler https://helm.doppler.com +helm install doppler-kubernetes-operator doppler/doppler-kubernetes-operator + +helm repo update +helm pull doppler/doppler-kubernetes-operator --untar +kubectl apply -f doppler-kubernetes-operator/crds/all.yaml + +helm upgrade doppler-kubernetes-operator doppler/doppler-kubernetes-operator \ + --set image.repository=cgr.dev/chainguard/doppler-kubernetes-operator \ + --set image.tag=latest +} +``` + +As per [project documentation](https://github.com/DopplerHQ/kubernetes-operator/blob/main/README.md). + diff --git a/images/doppler-kubernetes-operator/TESTING.md b/images/doppler-kubernetes-operator/TESTING.md new file mode 100644 index 0000000000..6892008434 --- /dev/null +++ b/images/doppler-kubernetes-operator/TESTING.md @@ -0,0 +1,131 @@ +# TESTING.md + +## Create a local cluster with kind + +Using kind you can create your local cluster + +```shell +kind create cluster +``` + +## Using Helm +You can install the latest Helm chart with: +```shell +helm repo add doppler https://helm.doppler.com +helm install --generate-name doppler/doppler-kubernetes-operator +``` + +You can upgrade using the latest helm chart. It's important to note that the CRD is not automatically updated if you just perform a simple helm upgrade. As such, be sure you follow this process: + +```shell +helm repo update +helm pull doppler/doppler-kubernetes-operator --untar +kubectl apply -f doppler-kubernetes-operator/crds/all.yaml +``` + +## Doppler Service Token + +Generate a Doppler Service Token and use it in this command to create your Doppler token secret: + +```shell +kubectl create secret generic doppler-token-secret \ + --namespace doppler-operator-system \ + --from-literal=serviceToken=dp.st.dev.XXXX +``` + +If you have the Doppler CLI installed, you can generate a Doppler Service Token from the CLI and create the Doppler token secret in one step: + +Note: This command will generate a Personal Token, which has the same access permissions as your user. If you're on the Developer plan or just doing a quick test to see how this works, that should be fine. However, if you're on the Team plan or higher and this is for an actual deployment, we recommend using a Service Account token instead. + +```shell +kubectl create secret generic doppler-token-secret \ + --namespace doppler-operator-system \ + --from-literal=serviceToken=$(doppler configs tokens create doppler-kubernetes-operator --plain) +``` + +## Doppler Token Secret +Next, we'll create a DopplerSecret that references your Doppler token secret and defines the location of the managed secret. + +```yaml +# dopplersecret.yaml +apiVersion: secrets.doppler.com/v1alpha1 +kind: DopplerSecret +metadata: + name: dopplersecret-test + namespace: doppler-operator-system +spec: + tokenSecret: + name: doppler-token-secret + managedSecret: + name: doppler-test-secret + namespace: default + type: Opaque +``` +To add the secret to your cluster + +```shell +kubectl apply -f dopplersecret.yaml +``` + +Check that the associated Kubernetes secret has been created: + +```shell +kubectl describe secrets --selector=secrets.doppler.com/subtype=dopplerSecret +``` + +## Deployment + +The envFrom field will populate a container's environment variables using the secret's Key-Value pairs: + +```yaml +# deployment-envfrom.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: doppler-test-deployment-envfrom + annotations: + secrets.doppler.com/reload: 'true' +spec: + replicas: 2 + selector: + matchLabels: + app: doppler-test + template: + metadata: + labels: + app: doppler-test + spec: + containers: + - name: doppler-test + image: alpine + command: + - /bin/sh + - -c + # Print all non-Kubernetes environment variables + - apk add --no-cache tini > /dev/null 2>&1 && + echo "### This is a simple deployment running with this env:" && + printenv | grep -v KUBERNETES_ && + tini -s tail -f /dev/null + imagePullPolicy: Always + envFrom: + - secretRef: + name: doppler-test-secret # Kubernetes secret name + resources: + requests: + memory: '250Mi' + cpu: '250m' + limits: + memory: '500Mi' + cpu: '500m' + +``` + +```shell +kubectl apply -f config/samples/deployment-envfrom.yaml +``` + +Once the Deployment has completed, you can view the logs of the test container: + +```shell +kubectl logs -lapp=doppler-test --tail=-1 +``` diff --git a/images/doppler-kubernetes-operator/config/main.tf b/images/doppler-kubernetes-operator/config/main.tf new file mode 100644 index 0000000000..231f90c9f2 --- /dev/null +++ b/images/doppler-kubernetes-operator/config/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + apko = { source = "chainguard-dev/apko" } + } +} + +variable "extra_packages" { + description = "The additional packages to install" + default = ["doppler-kubernetes-operator"] +} + +data "apko_config" "this" { + config_contents = file("${path.module}/template.apko.yaml") + extra_packages = var.extra_packages +} + +output "config" { + value = jsonencode(data.apko_config.this.config) +} diff --git a/images/doppler-kubernetes-operator/config/template.apko.yaml b/images/doppler-kubernetes-operator/config/template.apko.yaml new file mode 100644 index 0000000000..66af31acae --- /dev/null +++ b/images/doppler-kubernetes-operator/config/template.apko.yaml @@ -0,0 +1,12 @@ +accounts: + groups: + - groupname: nonroot + gid: 65532 + users: + - username: nonroot + uid: 65532 + gid: 65532 + run-as: 65532 + +entrypoint: + command: manager diff --git a/images/doppler-kubernetes-operator/main.tf b/images/doppler-kubernetes-operator/main.tf new file mode 100644 index 0000000000..d688236519 --- /dev/null +++ b/images/doppler-kubernetes-operator/main.tf @@ -0,0 +1,39 @@ +terraform { + required_providers { + oci = { source = "chainguard-dev/oci" } + } +} + +variable "target_repository" { + description = "The docker repo into which the image and attestations should be published." +} + +module "config" { source = "./config" } + +module "doppler-kubernetes-operator" { + source = "../../tflib/publisher" + name = basename(path.module) + target_repository = var.target_repository + config = module.config.config + + build-dev = true + +} + +module "test" { + source = "./tests" + digest = module.doppler-kubernetes-operator.image_ref +} + +resource "oci_tag" "latest" { + depends_on = [module.test] + digest_ref = module.doppler-kubernetes-operator.image_ref + tag = "latest" +} + +resource "oci_tag" "latest-dev" { + depends_on = [module.test] + digest_ref = module.doppler-kubernetes-operator.dev_ref + tag = "latest-dev" +} + diff --git a/images/doppler-kubernetes-operator/metadata.yaml b/images/doppler-kubernetes-operator/metadata.yaml new file mode 100644 index 0000000000..a60734f5a2 --- /dev/null +++ b/images/doppler-kubernetes-operator/metadata.yaml @@ -0,0 +1,10 @@ +name: images/doppler-kubernetes-operator +image: cgr.dev/chainguard/images/doppler-kubernetes-operator +logo: https://storage.googleapis.com/chainguard-academy/logos/images/doppler-kubernetes-operator.svg +endoflife: "" +console_summary: "" +short_description: Automatically sync secrets from Doppler to Kubernetes and auto-reload deployments when secrets change. +compatibility_notes: "" +readme_file: README.md +upstream_url: https://github.com/DopplerHQ/kubernetes-operator +keywords: [] diff --git a/images/doppler-kubernetes-operator/tests/main.tf b/images/doppler-kubernetes-operator/tests/main.tf new file mode 100644 index 0000000000..02defe2867 --- /dev/null +++ b/images/doppler-kubernetes-operator/tests/main.tf @@ -0,0 +1,61 @@ +terraform { + required_providers { + oci = { source = "chainguard-dev/oci" } + helm = { source = "hashicorp/helm" } + random = { source = "hashicorp/random" } + } +} + +variable "digest" { + description = "The image digest to run tests over." +} + +data "oci_string" "ref" { input = var.digest } + +resource "random_id" "id" { byte_length = 4 } + +resource "helm_release" "doppler-kubernetes-operator" { + name = "doppler-kubernetes-operator-${random_id.id.hex}" + namespace = "doppler-kubernetes-operator" + repository = "https://helm.doppler.com" + chart = "doppler-kubernetes-operator" + create_namespace = true + + values = [ + jsonencode({ + image = { + registry = data.oci_string.ref.registry + repository = data.oci_string.ref.repo + tag = data.oci_string.ref.pseudo_tag + } + }), + ] +} + +data "oci_exec_test" "smoke" { + digest = var.digest + script = "./smoke_test.sh" + working_dir = path.module + env = [ + { + name = "RELEASE_ID" + value = random_id.id.hex + }, + { + name = "RELEASE_NAME" + value = helm_release.doppler-kubernetes-operator.name + }, + { + name = "RELEASE_NAMESPACE" + value = helm_release.doppler-kubernetes-operator.namespace + } + ] + depends_on = [helm_release.doppler-kubernetes-operator] +} + +module "helm_cleanup" { + source = "../../../tflib/helm-cleanup" + name = helm_release.doppler-kubernetes-operator.id + namespace = helm_release.doppler-kubernetes-operator.namespace + depends_on = [data.oci_exec_test.smoke] +} diff --git a/images/doppler-kubernetes-operator/tests/smoke_test.sh b/images/doppler-kubernetes-operator/tests/smoke_test.sh new file mode 100755 index 0000000000..2c94f50c80 --- /dev/null +++ b/images/doppler-kubernetes-operator/tests/smoke_test.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +set -o errexit -o nounset -o errtrace -o pipefail -x + +sleep 5 + +DELAY=5 +NAMESPACE="doppler-operator-system" +CONTROLLER_POD=$(kubectl get pods -n "$NAMESPACE" -o=jsonpath='{.items[0].metadata.name}') +SECRET_NAME="doppler-token-secret" +DUMMY_SECRET_NAME="my-doppler-secret" +TIMER=0 +TIMEOUT=60 + +echo "Step 1: Create a dummy secret in Kubernetes" +kubectl create secret generic "$SECRET_NAME" --from-literal=token="$DUMMY_SECRET_NAME" -n "$NAMESPACE" + +echo "Step 2: Checking the operator status..." +kubectl wait --for=condition=Ready pod -n "$NAMESPACE" "$CONTROLLER_POD" --timeout="$TIMEOUT"s + +echo "Step 3: Creating the DopplerHQ secret resource..." +cat </dev/null) + if [ -n "$SECRET" ]; then + echo "¡The secret $SECRET_NAME was found in the namespace $NAMESPACE!" + exit 0 + fi + + # Wait the specified delay and then increment the timer + sleep $DELAY + TIMER=$(($TIMER + $DELAY)) +done + +echo "Could not create secret $SECRET_NAME within $TIMEOUT seconds!" +exit 1 diff --git a/images/pytorch-cuda12/README.md b/images/pytorch-cuda12/README.md index a3e714a33d..cff9f1569f 100644 --- a/images/pytorch-cuda12/README.md +++ b/images/pytorch-cuda12/README.md @@ -60,6 +60,7 @@ As a quick intro, we will use pytorch to create a very simple deep learning mode To run this script, ```bash + docker run --rm -it -v /home/srishihegde/quick.py:/tmp/model_builder.py --gpus all cgr.dev/chainguard/pytorch-cuda12:latest -c "python /tmp/model_builder.py" ``` A quickstart tutorial as outlined [here](https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html) can also be run using the tests/quickstart.py script similar to the above run diff --git a/images/pytorch-cuda12/config/main.tf b/images/pytorch-cuda12/config/main.tf index a31ecda3a9..cdd0da1c41 100644 --- a/images/pytorch-cuda12/config/main.tf +++ b/images/pytorch-cuda12/config/main.tf @@ -7,6 +7,7 @@ terraform { variable "extra_packages" { description = "Additional packages to install." type = list(string) + # torchvision is currently built on top of torch and should include all the packages we expect from it default = ["torchvision-cuda12"] } diff --git a/images/pytorch-cuda12/tests/torch_optim.py b/images/pytorch-cuda12/tests/torch_optim.py new file mode 100644 index 0000000000..a79c57fbde --- /dev/null +++ b/images/pytorch-cuda12/tests/torch_optim.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +import torch +import math + + +# Create Tensors to hold input and outputs. +x = torch.linspace(-math.pi, math.pi, 2000) +y = torch.sin(x) + +# Prepare the input tensor (x, x^2, x^3). +p = torch.tensor([1, 2, 3]) +xx = x.unsqueeze(-1).pow(p) + +# Use the nn package to define our model and loss function. +model = torch.nn.Sequential( + torch.nn.Linear(3, 1), + torch.nn.Flatten(0, 1) +) +loss_fn = torch.nn.MSELoss(reduction='sum') + +# Use the optim package to define an Optimizer that will update the weights of +# the model for us. Here we will use RMSprop; the optim package contains many other +# optimization algorithms. The first argument to the RMSprop constructor tells the +# optimizer which Tensors it should update. +learning_rate = 1e-3 +optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate) +for t in range(2000): + # Forward pass: compute predicted y by passing x to the model. + y_pred = model(xx) + + # Compute and print loss. + loss = loss_fn(y_pred, y) + if t % 100 == 99: + print(t, loss.item()) + + # Before the backward pass, use the optimizer object to zero all of the + # gradients for the variables it will update (which are the learnable + # weights of the model). This is because by default, gradients are + # accumulated in buffers( i.e, not overwritten) whenever .backward() + # is called. Checkout docs of torch.autograd.backward for more details. + optimizer.zero_grad() + + # Backward pass: compute gradient of the loss with respect to model + # parameters + loss.backward() + + # Calling the step function on an Optimizer makes an update to its + # parameters + optimizer.step() + + +linear_layer = model[0] +print(f'Result: y = {linear_layer.bias.item()} + {linear_layer.weight[:, 0].item()} x + {linear_layer.weight[:, 1].item()} x^2 + {linear_layer.weight[:, 2].item()} x^3') diff --git a/main.tf b/main.tf index d78d2e84f2..1e12db419f 100644 --- a/main.tf +++ b/main.tf @@ -347,6 +347,11 @@ module "dive" { target_repository = "${var.target_repository}/dive" } +module "doppler-kubernetes-operator" { + source = "./images/doppler-kubernetes-operator" + target_repository = "${var.target_repository}/doppler-kubernetes-operator" +} + module "docker-selenium" { source = "./images/docker-selenium" target_repository = "${var.target_repository}/docker-selenium"