From f313c9de498e358568448ce952a97c96c75f1bb5 Mon Sep 17 00:00:00 2001 From: Mariusz Sabath Date: Mon, 13 Sep 2021 18:13:57 -0400 Subject: [PATCH] updates to support OIDC demo for VAULT Signed-off-by: Mariusz Sabath --- docs/spire-oidc-tutorial.md | 11 +++++---- docs/spire-oidc-vault.md | 35 ++++++++++++++++++----------- examples/spire/vault-oidc.sh | 2 +- utils/install-open-shift-spire.sh | 2 +- utils/install-open-shift-tornjak.sh | 16 +++++++------ 5 files changed, 38 insertions(+), 28 deletions(-) diff --git a/docs/spire-oidc-tutorial.md b/docs/spire-oidc-tutorial.md index 13e307db..e0451bff 100644 --- a/docs/spire-oidc-tutorial.md +++ b/docs/spire-oidc-tutorial.md @@ -9,13 +9,9 @@ In this example we will deploy Tornjak and SPIRE server on OpenShift in IBM Clou Follow the documentation to deploy [Tornjak on Openshift](./spire-on-openshift.md#deploy-on-openshift]) with exception of enabling the `--oidc` flag: -```console -# check if rootCA is present: -ls sample-keys/CA -rootCA.crt rootCA.key rootCA.srl - +``` # install: -utils/install-open-shift-tornjak.sh -c -t -p --oidc +utils/install-open-shift-tornjak.sh -c $CLUSTER_NAME -t $TRUST_DOMAIN -p $PROJECT_NAME --oidc ``` for example: @@ -65,6 +61,9 @@ This output confirms that the OIDC endpoint is accessible and responds with vali Let's install the [SPIRE Agents](./spire-on-openshift.md#step-2-installing-spire-agents-on-openshift): ``` +oc new-project spire --description="My TSI Spire Agent project on OpenShift" +kubectl get configmap spire-bundle -n tornjak -o yaml | sed "s/namespace: tornjak/namespace: spire/" | kubectl apply -n spire -f - + export SPIRE_SERVER=spire-server-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud utils/install-open-shift-spire.sh -c space-x.01 -s $SPIRE_SERVER -t openshift.space-x.com diff --git a/docs/spire-oidc-vault.md b/docs/spire-oidc-vault.md index 222a8b99..a3900112 100644 --- a/docs/spire-oidc-vault.md +++ b/docs/spire-oidc-vault.md @@ -28,6 +28,7 @@ vault login -no-print "${ROOT_TOKEN}" ## Configure a Vault instace: We have a script [examples/spire/vault-oidc.sh](../examples/spire/vault-oidc.sh) that configures the Vault instance with the required demo configuration, but before we run it, let's first explain what happens. +**All the commands listed here are in the script, so don't run them!** First few commands enable the Secret Engine and setup Vault OIDC Federation with our instance of SPIRE. @@ -43,7 +44,7 @@ vault auth enable jwt Set up our OIDC Discovery URL, using the values created in [OIDC tutorial setup](./spire-oidc-tutorial.md) and using defalt role **dev**: ``` -vault write auth/jwt/config oidc_discovery_url=$SPIRE_SERVER default_role=“dev” +vault write auth/jwt/config oidc_discovery_url=$OIDC_URL default_role=“dev” ``` Define a policy `my-dev-policy` that gives `read` access to `my-super-secret`: @@ -70,7 +71,7 @@ cat > role.json < ``` +Here is our example: +```console +examples/spire/vault-oidc.sh https://oidc-tornjak.space-x01-9d995c4a8c7c5f281ce13d546a94-0000.us-east.containers.appdomain.cloud $ROOT_TOKEN $VAULT_ADDR +``` + -Now, create a test secret value: +Once the script successfully completes, +create a test secret value: ```console vault kv put secret/my-super-secret test=123 ``` @@ -105,20 +112,22 @@ vault kv put secret/my-super-secret test=123 For testing this setup we are going to use [examples/spire/mars-spaceX.yaml](examples/spire/mars-spaceX.yaml) deployment. -Based on the following annotation: +Make sure the pod label matches the label in The Workload Registrar Configuration. ```yaml -metadata: - annotations: - spire-workload-id: eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b +template: + metadata: + labels: + identity_template: "true" + app: mars-mission ``` -this container will get the following identity: +this container will get the identity that might look like this: -`eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b` +`spiffe://openshift.space-x.com/region/us-east/cluster_name/space-x.01/ns/default/sa/elon-musk/pod_name/mars-mission-7874fd667c-rchk5` -Let's create a container and get inside: +Let's create a pod and get inside the container: ```console kubectl -n default create -f examples/spire/mars-spaceX.yaml @@ -148,7 +157,7 @@ The JWT token is the long string that follows the **token**: ```console bin/spire-agent api fetch jwt -audience vault -socketPath /run/spire/sock ets/agent.sock -token(spiffe://openshift.space-x.com/eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b): +token(spiffe://openshift.space-x.com/region/us-east/cluster_name/space-x.01/ns/default/sa/elon-musk/pod_name/mars-mission-7874fd667c-rchk5): eyJhbGciOiJSUzI1NiIs....cy46fb465a ``` @@ -161,7 +170,7 @@ Export also `eurole` as **ROLE** and actual **VAULT_ADDR** ```console export ROLE=eurole -export VAULT_ADDR=http://tsi-kube01-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-south.containers.appdomain.cloud +export VAULT_ADDR=http://tsi-vault-tsi-vault.space-x01-9d995c4a8c7c5f281ce13d546a94-0000.us-east.containers.appdomain.cloud ``` Now let's try to login to Vault using the JWT token: diff --git a/examples/spire/vault-oidc.sh b/examples/spire/vault-oidc.sh index 11cde31d..71b07dc9 100755 --- a/examples/spire/vault-oidc.sh +++ b/examples/spire/vault-oidc.sh @@ -73,7 +73,7 @@ EOF "bound_audiences": "vault", "bound_claims_type": "glob", "bound_claims": { - "sub":"spiffe://openshift.space-x.com/eu-*/*/*/elon-musk/mars-mission-main/*" + "sub":"spiffe://openshift.space-x.com/region/*/cluster_name/*/ns/*/sa/elon-musk/pod_name/mars-mission-*" }, "token_ttl": "24h", "token_policies": "my-dev-policy" diff --git a/utils/install-open-shift-spire.sh b/utils/install-open-shift-spire.sh index 7e42a5ee..48d713ae 100755 --- a/utils/install-open-shift-spire.sh +++ b/utils/install-open-shift-spire.sh @@ -112,7 +112,7 @@ installSpireAgent(){ oc get projects | grep "${PROJECT}" if [ "$?" != "0" ]; then echo "Project $PROJECT must be created first" - echo "oc new-project $PROJECT --description=\"My TSI Spire Agent project on OpenShift\" 2> /dev/null" + echo "oc new-project $PROJECT --description=\"My TSI Spire Agent project on OpenShift\" " exit 1 fi diff --git a/utils/install-open-shift-tornjak.sh b/utils/install-open-shift-tornjak.sh index dc0b8382..76ee31fb 100755 --- a/utils/install-open-shift-tornjak.sh +++ b/utils/install-open-shift-tornjak.sh @@ -23,7 +23,7 @@ Syntax: ${0} -c -t -p --oidc Where: -c - name of the OpenShift cluster (required) -t - the trust root of SPIFFE identity provider, default: spiretest.com (optional) - -p - OpenShift project [namespace] to install the Server, default: spire-server (optional) + -p - OpenShift project [namespace] to install the Server, default: tornjak (optional) --oidc - execute OIDC installation (optional) --clean - performs removal of project (allows additional parameters i.e. -p|--project). HELPMEHELPME @@ -36,12 +36,13 @@ cleanup() { oc delete ClusterRole spire-server-role 2>/dev/null oc delete ClusterRoleBinding spire-server-binding 2>/dev/null + oc delete statefulset.apps/spire-server 2>/dev/null oc delete scc "$SPIRE_SCC" 2>/dev/null oc delete sa "$SPIRE_SA" 2>/dev/null - oc delete route spire-server 2>/dev/null - oc delete route tornjak-http 2>/dev/null - oc delete route tornjak-mtls 2>/dev/null - oc delete route tornjak-tls 2>/dev/null + oc delete secret spire-secret tornjak-certs 2>/dev/null + oc delete cm spire-bundle spire-server oidc-discovery-provider 2>/dev/null + oc delete service spire-server spire-oidc tornjak-http tornjak-mtls tornjak-tls 2>/dev/null + oc delete route spire-server tornjak-http tornjak-mtls tornjak-tls oidc 2>/dev/null oc delete ingress spireingress 2>/dev/null #oc delete group $GROUPNAME --ignore-not-found=true #oc delete project "$PROJECT" 2>/dev/null @@ -108,11 +109,12 @@ installSpireServer(){ oc get projects | grep $PROJECT if [ "$?" != "0" ]; then echo "Project $PROJECT must be created first" - echo "oc new-project $PROJECT --description=\"My TSI Spire SERVER project on OpenShift\" 2> /dev/null" + echo "oc new-project $PROJECT --description=\"My TSI Spire SERVER project on OpenShift\" " exit 1 fi - oc -n $PROJECT get statefulset spire-server + # test if Tornjak already exists: + oc -n $PROJECT get statefulset spire-server 2>/dev/null if [ "$?" == "0" ]; then # check if spire-server project exists: echo "$PROJECT project already exists. "