diff --git a/README.md b/README.md index 52058472..1c6b2162 100644 --- a/README.md +++ b/README.md @@ -251,8 +251,6 @@ service, deployed in `tsi-vault` namespace in your cluster. ```console kubectl create ns tsi-vault kubectl -n tsi-vault create -f examples/vault/vault.yaml -service/tsi-vault created -deployment.apps/tsi-vault created ``` #### Obtain remote access to Vault service diff --git a/charts/spire/templates/NOTES.txt b/charts/spire/templates/NOTES.txt index c9aea092..1d94a68d 100644 --- a/charts/spire/templates/NOTES.txt +++ b/charts/spire/templates/NOTES.txt @@ -12,9 +12,10 @@ Universal Trusted Workload Identity Service has completed. {{- end }} SPIRE info: - Spire Server address: {{ .Values.spireAddress }}:{{ .Values.spirePort }} - Spire Agent image: {{ .Values.spireAgentImg }}:{{ .Values.spireVersion }} - Spire Registrar image: {{ .Values.spireRegistrarImg }}:{{ .Values.spireVersion }} + Spire Server address: {{ .Values.spireServer.address }}:{{ .Values.spireServer.port }} + Spire Agent image: {{ .Values.spireAgent.img }}:{{ .Values.spireVersion }} + Spire Registrar image: {{ .Values.spireRegistrar.img }}:{{ .Values.spireVersion }} + Spire Agent sockets: {{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }} To enable Workload Registrar, create an entry on Tornjak UI: diff --git a/charts/spire/templates/agent-configmap.tpl b/charts/spire/templates/agent-configmap.tpl index 03b991b2..e759b02f 100644 --- a/charts/spire/templates/agent-configmap.tpl +++ b/charts/spire/templates/agent-configmap.tpl @@ -8,22 +8,28 @@ data: agent { data_dir = "/run/spire" log_level = "DEBUG" - server_address = "{{ .Values.spireAddress }}" - server_port = "{{ .Values.spirePort }}" - socket_path = "/run/spire/sockets/agent.sock" + server_address = "{{ .Values.spireServer.address }}" + server_port = "{{ .Values.spireServer.port }}" + socket_path = "{{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }}" trust_bundle_path = "/run/spire/bundle/bundle.crt" trust_domain = "{{ .Values.trustdomain }}" } plugins { - NodeAttestor "k8s_psat" { - plugin_data { - cluster = "{{ .Values.clustername }}" - } - } {{- if .Values.aws }} NodeAttestor "aws_iid" { plugin_data {} } + {{- else if .Values.azure }} + NodeAttestor "azure_msi" { + plugin_data { + } + } + {{- else }} + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "{{ .Values.clustername }}" + } + } {{- end }} KeyManager "memory" { plugin_data { diff --git a/charts/spire/templates/agent-daemonset.tpl b/charts/spire/templates/agent-daemonset.tpl index d370a498..cbf91745 100644 --- a/charts/spire/templates/agent-daemonset.tpl +++ b/charts/spire/templates/agent-daemonset.tpl @@ -25,10 +25,10 @@ spec: # you prefer that waits for a service to be up. This image is built # from https://github.com/lqhl/wait-for-it image: gcr.io/spiffe-io/wait-for-it - args: ["-t", "30", "{{ .Values.spireAddress }}:{{ .Values.spirePort }}"] + args: ["-t", "30", "{{ .Values.spireServer.address }}:{{ .Values.spireServer.port }}"] containers: - name: spire-agent - image: {{ .Values.spireAgentImg }}:{{ .Values.spireVersion }} + image: {{ .Values.spireAgent.img }}:{{ .Values.spireVersion }} securityContext: # TODO: review this, maybe applicable for OpenShift only: # privilaged is needed to create socket and bundle files @@ -39,7 +39,7 @@ spec: mountPath: /run/spire/config readOnly: true - name: spire-agent-socket - mountPath: /run/spire/sockets + mountPath: {{ .Values.spireAgent.socketDir }} readOnly: false - name: spire-bundle mountPath: /run/spire/bundle @@ -52,7 +52,7 @@ spec: - /opt/spire/bin/spire-agent - healthcheck - -socketPath - - /run/spire/sockets/agent.sock + - {{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }} failureThreshold: 2 initialDelaySeconds: 15 periodSeconds: 60 @@ -66,7 +66,7 @@ spec: name: spire-bundle - name: spire-agent-socket hostPath: - path: /run/spire/sockets + path: {{ .Values.spireAgent.socketDir }} type: DirectoryOrCreate - name: spire-agent-token projected: diff --git a/charts/spire/templates/crd_role_binding.yaml b/charts/spire/templates/crd_role_binding.yaml index 46a8b770..5573a62c 100644 --- a/charts/spire/templates/crd_role_binding.yaml +++ b/charts/spire/templates/crd_role_binding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: spire-k8s-registrar - namespace: spire + namespace: {{ .Values.namespace }} diff --git a/charts/spire/templates/k8s-workload-registrar-configmap.tpl b/charts/spire/templates/k8s-workload-registrar-configmap.tpl index 7c26aee8..9a07e958 100644 --- a/charts/spire/templates/k8s-workload-registrar-configmap.tpl +++ b/charts/spire/templates/k8s-workload-registrar-configmap.tpl @@ -10,8 +10,8 @@ data: trust_domain = "{{ .Values.trustdomain }}" # enable when direct socket access to SPIRE Server available: # server_socket_path = "/run/spire/sockets/registration.sock" - agent_socket_path = "/run/spire/sockets/agent.sock" - server_address = "{{ .Values.spireAddress }}:{{ .Values.spirePort }}" + agent_socket_path = "{{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }}" + server_address = "{{ .Values.spireServer.address }}:{{ .Values.spireServer.port }}" cluster = "{{ .Values.clustername }}" # enable for label based registration: # pod_label = "spire-workload-id" diff --git a/charts/spire/templates/k8s-workload-registrar-deploy.tpl b/charts/spire/templates/k8s-workload-registrar-deploy.tpl index 02818517..eaf2e26e 100644 --- a/charts/spire/templates/k8s-workload-registrar-deploy.tpl +++ b/charts/spire/templates/k8s-workload-registrar-deploy.tpl @@ -21,7 +21,7 @@ spec: containers: - name: k8s-workload-registrar #image: k8s-workload-registrar:latest - image: {{ .Values.spireRegistrarImg }}:{{ .Values.spireVersion }} + image: {{ .Values.spireRegistrar.img }}:{{ .Values.spireVersion }} imagePullPolicy: Always securityContext: # TODO: review this, maybe applicable for OpenShift only: @@ -32,7 +32,7 @@ spec: - /run/k8s-workload-registrar/config/registrar.conf volumeMounts: - name: spire-registrar-socket - mountPath: /run/spire/sockets + mountPath: {{ .Values.spireAgent.socketDir }} readOnly: false - name: k8s-workload-registrar-config mountPath: /run/k8s-workload-registrar/config @@ -40,7 +40,7 @@ spec: volumes: - name: spire-registrar-socket hostPath: - path: /run/spire/sockets + path: {{ .Values.spireAgent.socketDir }} type: DirectoryOrCreate - name: k8s-workload-registrar-config configMap: diff --git a/charts/spire/values.yaml b/charts/spire/values.yaml index 7ac228c0..d580707f 100644 --- a/charts/spire/values.yaml +++ b/charts/spire/values.yaml @@ -1,31 +1,45 @@ --- # Default values for spire. # This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# workaround since Azure doesn't support by default 10250 security port for kubelet -azure: false -# aws - enables the plugin for AWS NodeAttestor -aws: false -# openShift requires special configuration, including different security level -openShift: false # namespace - namespace for deploying SPIRE agents and workload registrar namespace: spire - -# SPIRE specific values -# clustername needs to match between spire-config, k8s-workload-registrar config, and the actual cluster name +# clustername needs to match between agent-config, k8s-workload-registrar config, +# and the actual cluster name clustername: spire-example # region - arbitrary label to describe the region region: sample-region -# trustdomain is arbitrary but needs to match between spire-config and k8s-workload-registrar config -# for multi-cluster support, trustdomain must be identical for all clusters and + +# SPIRE related elements +# trustdomain is arbitrary but needs to match between agent-config and +# k8s-workload-registrar config and SPIRE Server. +# For multi-cluster support, trustdomain must be identical for all clusters and # SPIRE server trustdomain: spiretest.com - -# SPIRE workload registrar values: -spireAddress: spire-server -spirePort: 443 -spireRegistrarImg: gcr.io/spiffe-io/k8s-workload-registrar -spireAgentImg: gcr.io/spiffe-io/spire-agent +# SPIRE version: spireVersion: 1.0.2 + +# spireServer - location of the SPIRE server +spireServer: + address: spire-server.appdomain.cloud + port: 443 + +# SPIRE Agent related configuration +spireAgent: + img: gcr.io/spiffe-io/spire-agent + # SPIRE Agent socket: + socketDir: /run/spire/sockets + socketFile: agent.sock + +# Workload Registrar configuration +spireRegistrar: + img: gcr.io/spiffe-io/k8s-workload-registrar + +# Additional configuration related to the platform +# azure - enables the plugin to suppor Azure platform, also +# workaround since Azure doesn't support by default 10250 security port for kubelet +azure: false +# aws - enables the plugin for AWS NodeAttestor +aws: false +# openShift requires special configuration, including different security level +openShift: false diff --git a/charts/tornjak/templates/NOTES.txt b/charts/tornjak/templates/NOTES.txt index 27913c51..97e087c8 100644 --- a/charts/tornjak/templates/NOTES.txt +++ b/charts/tornjak/templates/NOTES.txt @@ -6,42 +6,44 @@ Universal Trusted Workload Identity Service has completed. Cluster name: {{ .Values.clustername }} Trust Domain: {{ .Values.trustdomain }} - Tornjak Image: {{ .Values.tornjakImg }}:{{ .Values.spireVersion }} - SPIRE Server Socket: {{ .Values.spireServerSocket }} + Tornjak Image: {{ .Values.spireServer.img }}:{{ .Values.spireVersion }} + SPIRE Server socket: {{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }} {{- if .Values.openShift }} OpenShift mode: true {{- end }} -{{- if .Values.aws_iid -}} -{{- if .Values.aws_iid.access_key_id -}} -{{- if .Values.aws_iid.secret_access_key -}} +{{- if .Values.attestors.aws_iid -}} +{{- if .Values.attestors.aws_iid.access_key_id -}} +{{- if .Values.attestors.aws_iid.secret_access_key -}} Using NodeAttestor "aws_iid" with "access_key_id" and "secret_access_key" {{- end }} {{- end }} {{- end }} -{{- if .Values.OIDC.enable }} +{{- if .Values.oidc.enable }} OIDC enable: true - OIDC service name: {{ .Values.OIDC.serviceName }} - MY_DISCOVERY_DOMAIN: {{ .Values.OIDC.MY_DISCOVERY_DOMAIN }} + OIDC image: {{ .Values.oidc.image }} + OIDC service name: {{ .Values.oidc.serviceName }} + listen_socket_path: {{ .Values.oidc.socketDir }}/{{ .Values.oidc.socketFile }} + myDiscoveryDomain: {{ .Values.oidc.myDiscoveryDomain }} {{- end }} -{{- if .Values.k8s_psat.remoteClusters }} +{{- if .Values.attestors.k8s_psat.remoteClusters }} Multi-cluster support enabled. Make sure `kubeconfig` secret is created to support a following: clusters = { "{{ .Values.clustername }}" = { # use_token_review_api_validation = true - service_account_whitelist = ["spire:spire-agent"] + service_account_allow_list = ["spire:spire-agent"] }, - {{- range $k, $v := .Values.k8s_psat.remoteClusters }} + {{- range $k, $v := .Values.attestors.k8s_psat.remoteClusters }} "{{ $v.name }}" = { - service_account_whitelist = ["{{ $v.namespace | default "spire" }}:{{ $v.serviceAccount | default "spire-agent" }}"] + service_account_allow_list = ["{{ $v.namespace | default "spire" }}:{{ $v.serviceAccount | default "spire-agent" }}"] kube_config_file = "/run/spire/kubeconfigs/{{ $v.name }}" }, - {{- end }} + {{- end }} + } {{- end }} - } To learn more about the release, try: diff --git a/charts/tornjak/templates/oidc-dp-configmap.tpl b/charts/tornjak/templates/oidc-dp-configmap.tpl index f0d57422..303b105d 100644 --- a/charts/tornjak/templates/oidc-dp-configmap.tpl +++ b/charts/tornjak/templates/oidc-dp-configmap.tpl @@ -1,4 +1,4 @@ -{{- if .Values.OIDC.enable }} +{{- if .Values.oidc.enable }} apiVersion: v1 kind: ConfigMap metadata: @@ -6,12 +6,11 @@ metadata: namespace: {{ .Values.namespace }} data: oidc-discovery-provider.conf: | - log_level = "INFO" - domain = "{{ .Values.OIDC.serviceName }}.{{ .Values.OIDC.MY_DISCOVERY_DOMAIN }}" - listen_socket_path = "/run/oidc-discovery-provider/server.sock" - log_level = "info" + log_level = "debug" + domain = "{{ .Values.oidc.serviceName }}.{{ .Values.oidc.myDiscoveryDomain }}" + listen_socket_path = "{{ .Values.oidc.socketDir }}/{{ .Values.oidc.socketFile }}" server_api { - address = "unix:///{{ .Values.spireServerSocket }}" + address = "unix:///{{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }}" } nginx.conf: | user root; diff --git a/charts/tornjak/templates/server-configmap.tpl b/charts/tornjak/templates/server-configmap.tpl index ced225e4..0c9796a3 100644 --- a/charts/tornjak/templates/server-configmap.tpl +++ b/charts/tornjak/templates/server-configmap.tpl @@ -18,14 +18,14 @@ data: data_dir = "/run/spire/data" log_level = "DEBUG" default_svid_ttl = "1h" - socket_path = "{{ .Values.spireServerSocket }}" + socket_path = "{{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }}" -{{- if .Values.OIDC.enable }} + {{- if .Values.oidc.enable }} #AWS requires the use of RSA. EC cryptography is not supported ca_key_type = "rsa-2048" # Creates the iss claim in JWT-SVIDs. - jwt_issuer = "https://{{ .Values.OIDC.serviceName }}.{{ .Values.OIDC.MY_DISCOVERY_DOMAIN }}" + jwt_issuer = "https://{{ .Values.oidc.serviceName }}.{{ .Values.oidc.myDiscoveryDomain }}" experimental { // Turns on the bundle endpoint (required, true) @@ -37,7 +37,7 @@ data: // The port to listen on (optional, defaults to 443) bundle_endpoint_port = 8443 } -{{- end }} + {{- end }} ca_subject = { country = ["US"], @@ -59,10 +59,10 @@ data: # use_token_review_api_validation = true service_account_allow_list = ["spire:spire-agent"] }, - {{- if .Values.k8s_psat.remoteClusters }} - {{- range $k, $v := .Values.k8s_psat.remoteClusters }} + {{- if .Values.attestors.k8s_psat.remoteClusters }} + {{- range $k, $v := .Values.attestors.k8s_psat.remoteClusters }} "{{ $v.name }}" = { - service_account_whitelist = ["{{ $v.namespace | default "spire" }}:{{ $v.serviceAccount | default "spire-agent" }}"] + service_account_allow_list = ["{{ $v.namespace | default "spire" }}:{{ $v.serviceAccount | default "spire-agent" }}"] kube_config_file = "/run/spire/kubeconfigs/{{ $v.name }}" }, {{- end }} @@ -70,28 +70,46 @@ data: } } } - {{- if .Values.aws_iid -}} - {{- if .Values.aws_iid.access_key_id -}} - {{- if .Values.aws_iid.secret_access_key -}} + + {{- if .Values.attestors.aws_iid -}} + {{- if .Values.attestors.aws_iid.access_key_id -}} + {{- if .Values.attestors.aws_iid.secret_access_key -}} NodeAttestor "aws_iid" { plugin_data { - access_key_id = "{{- .Values.aws_iid.access_key_id -}}" - secret_access_key = "{{- .Values.aws_iid.secret_access_key -}}" - skip_block_device: {{- .Values.aws_iid.skip_block_device -}} + access_key_id = "{{- .Values.attestors.aws_iid.access_key_id -}}" + secret_access_key = "{{- .Values.attestors.aws_iid.secret_access_key -}}" + skip_block_device = {{- .Values.attestors.aws_iid.skip_block_device -}} } } + {{- end }} {{- end }} {{- end }} - NodeResolver "noop" { - plugin_data {} + + {{- if .Values.attestors.azure_msi -}} + {{- if .Values.attestors.azure_msi.tenants -}} + NodeAttestor "azure_msi" { + enabled = true + plugin_data { + tenants = { + // Tenant configured with the default resource id (i.e. the resource manager) + {{- range $k, $v := .Values.attestors.azure_msi.tenants }} + "{{ $v.tenant }}" = {}, + {{- end }} + } + } } + {{- end }} + {{- end }} + KeyManager "disk" { plugin_data { keys_path = "/run/spire/data/keys.json" } } - {{- if not .Values.selfSignedCA }} + + {{- if not .Values.spireServer }} + {{- if not .Values.spireServer.selfSignedCA }} UpstreamAuthority "disk" { plugin_data { ttl = "12h" @@ -100,6 +118,7 @@ data: } } {{- end }} + {{- end }} Notifier "k8sbundle" { plugin_data { # This plugin updates the bundle.crt value in the spire:spire-bundle diff --git a/charts/tornjak/templates/server-oidc-service.tpl b/charts/tornjak/templates/server-oidc-service.tpl index 355ff71b..380726a8 100644 --- a/charts/tornjak/templates/server-oidc-service.tpl +++ b/charts/tornjak/templates/server-oidc-service.tpl @@ -1,4 +1,4 @@ -{{- if .Values.OIDC.enable }} +{{- if .Values.oidc.enable }} # Service definition for the admission webhook apiVersion: v1 kind: Service diff --git a/charts/tornjak/templates/server-statefulset.tpl b/charts/tornjak/templates/server-statefulset.tpl index 7deca754..d7b619fb 100644 --- a/charts/tornjak/templates/server-statefulset.tpl +++ b/charts/tornjak/templates/server-statefulset.tpl @@ -21,8 +21,7 @@ spec: shareProcessNamespace: true containers: - name: spire-server - # image: gcr.io/spiffe-io/spire-server:0.11.0 - image: {{ .Values.tornjakImg }}:{{ .Values.spireVersion }} + image: {{ .Values.spireServer.img }}:{{ .Values.spireVersion }} imagePullPolicy: Always args: - -config @@ -43,48 +42,52 @@ spec: - name: certs mountPath: /opt/spire/sample-keys - name: spire-server-socket - mountPath: /run/spire/sockets + mountPath: {{ .Values.spireServer.socketDir }} readOnly: false -{{- if .Values.k8s_psat.remoteClusters }} + {{- if .Values.attestors.k8s_psat.remoteClusters }} - name: kubeconfigs mountPath: /run/spire/kubeconfigs -{{- end }} + {{- end }} livenessProbe: exec: command: - "/opt/spire/bin/spire-server" - "healthcheck" - "-socketPath" - - "{{ .Values.spireServerSocket }}" + - "{{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }}" failureThreshold: 2 initialDelaySeconds: 15 periodSeconds: 60 timeoutSeconds: 3 -{{- if .Values.OIDC.enable }} + {{- if .Values.oidc.enable }} readinessProbe: exec: command: - "/opt/spire/bin/spire-server" - "healthcheck" - "-socketPath" - - "{{ .Values.spireServerSocket }}" + - "{{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }}" - "--shallow" initialDelaySeconds: 5 periodSeconds: 10 -{{- end }} -{{- if .Values.OIDC.enable }} + {{- end }} + {{- if .Values.oidc.enable }} - name: spire-oidc - image: gcr.io/spiffe-io/oidc-discovery-provider:{{ .Values.spireVersion }} + image: {{ .Values.oidc.image }}:{{ .Values.spireVersion }} args: - -config - /run/spire/oidc/config/oidc-discovery-provider.conf ports: - containerPort: 443 name: spire-oidc-port + securityContext: + # privileged is needed to access mounted files (e.g. /run/spire/data) + # not needed if using volumeClaimTemplates and sockets + privileged: true volumeMounts: - name: spire-server-socket - mountPath: /run/spire/sockets - readOnly: true + mountPath: {{ .Values.spireServer.socketDir }} + # readOnly: true - name: spire-oidc-config mountPath: /run/spire/oidc/config/ readOnly: true @@ -92,7 +95,7 @@ spec: mountPath: /run/spire/data readOnly: false - name: spire-oidc-socket - mountPath: /run/oidc-discovery-provider/ + mountPath: {{ .Values.oidc.socketDir }} readinessProbe: exec: # TODO: This needs to be revisited. @@ -101,6 +104,7 @@ spec: command: ["/bin/ps", "aux", " ||", "grep", "oidc-discovery-provider -config /run/spire/oidc/config/oidc-discovery-provider.conf"] initialDelaySeconds: 5 periodSeconds: 5 + - name: nginx-oidc image: nginx:latest ports: @@ -117,8 +121,8 @@ spec: mountPath: /run/spire/oidc/config/ readOnly: true - name: spire-oidc-socket - mountPath: /run/oidc-discovery-provider/ -{{- end }} + mountPath: {{ .Values.oidc.socketDir }} + {{- end }} volumes: - name: spire-config configMap: @@ -127,29 +131,29 @@ spec: secret: defaultMode: 0400 secretName: tornjak-certs -{{- if .Values.k8s_psat.remoteClusters }} + {{- if .Values.attestors.k8s_psat.remoteClusters }} - name: kubeconfigs secret: defaultMode: 0400 secretName: kubeconfigs -{{- end }} -{{- if .Values.OIDC.enable }} + {{- end }} + {{- if .Values.oidc.enable }} - name: spire-server-socket hostPath: - path: {{ .Values.spireServerSocket }} + path: {{ .Values.spireServer.socketDir }} type: DirectoryOrCreate - name: spire-oidc-socket emptyDir: {} - name: spire-oidc-config configMap: name: oidc-discovery-provider -{{- else }} + {{- else }} - name: spire-server-socket emptyDir: {} - name: spire-entries configMap: name: spire-entries -{{- end }} + {{- end }} # remove if using volumeClaimTemplates - name: spire-data hostPath: diff --git a/charts/tornjak/values.yaml b/charts/tornjak/values.yaml index 6dc1d8c4..64eca7d5 100644 --- a/charts/tornjak/values.yaml +++ b/charts/tornjak/values.yaml @@ -1,58 +1,82 @@ --- -# Default values for spire. +# Default values for deployment of SPIRE Server with Tornjak # This is a YAML-formatted file. -# Declare variables to be passed into your templates. - +# namespace - for deploying the Server and Tornjak namespace: tornjak -selfSignedCA: true # openShift requires special configuration, including different security level openShift: false - -# SPIRE specific values -# clustername needs to match between spire-config, k8s-workload-registrar config, and the actual cluster name +# clustername is required to set up access for SPIRE agents deployed +# in the same cluster clustername: spire-example + +# SPIRE related elements # trustdomain is arbitrary but needs to match between Server and Agent trustdomain: spiretest.com +# SPIRE version used for consistency across components spireVersion: 1.0.2 -tornjakImg: ghcr.io/spiffe/tornjak-spire-server -spireServerSocket: /run/spire/sockets/registration.sock + +# SPIRE Server configuration +spireServer: + # tornjakImage - Tornjak with SPIRE Server + # TODO this is just a temporary image with several patches. It + # should be removed after the patches are available in the SPIRE main + # img: ghcr.io/spiffe/tornjak-spire-server + img: tsidentity/local-spire-server + socketDir: /run/spire-server/private + socketFile: api.sock + # selfSignedCA - SPIRE will create the self signed CA unless this value + # is set to 'false'. In this case, make sure the key is accessible by + # Server in "/run/spire/secret/bootstrap.key" and the certificate in + # "/run/spire/secret/bootstrap.crt" + selfSignedCA: true # Enable OIDC -OIDC: +oidc: # to enable support for OIDC, change the value to true enable: false serviceName: oidc-tornjak - # replace the MY_DISCOVERY_DOMAIN with the Ingress information + image: gcr.io/spiffe-io/oidc-discovery-provider + socketDir: /run/oidc-discovery-provider + socketFile: server.sock + # myDiscoveryDomain - replace the myDiscoveryDomain with the Ingress information # e.g. this could be an output of IBM Cloud command: # ibmcloud oc cluster get --cluster "$MY_CLUSTER" --output json | jq -r '.ingressHostname' - # MY_DISCOVERY_DOMAIN: MY_DISCOVERY_DOMAIN + myDiscoveryDomain: MY_DISCOVERY_DOMAIN + +attestors: + # k8s_psat - enables support for a single SPIRE server managing + # agents deployed in multiple, different clusters, with K8S PSAT NodeAttestor + # Prior to this deployment, create a secret that contains KUBECONFIG information + # for every remoteCluster. + # `kubectl config view --flatten > /tmp/kubeconfigs/` + # `kubectl config view --flatten > /tmp/kubeconfigs/` + # Then create a secret: + # `kubectl -n tornjak create secret generic kubeconfigs --from-file=/tmp/kubeconfigs` + # + # Provide "name" value(s). "namespace" and "serviceAccount" are optional + # default namespace value = "spire" + # default serviceAccount value = spire-agent + k8s_psat: + remoteClusters: + # - name: cluster1 + # namespace: spire + # serviceAccount: spire-agent + # - name: cluster2 + # - name: cluster3 + # namespace: spire + # serviceAccount: spire-agent -# k8s_psat - enables support for a single SPIRE server managing -# agents deployed in multiple, different clusters, with K8S PSAT NodeAttestor -# Prior to this deployment, create a secret that contains KUBECONFIG information -# for every remoteCluster. -# `kubectl config view --flatten > /tmp/kubeconfigs/` -# `kubectl config view --flatten > /tmp/kubeconfigs/` -# Then create a secret: -# `kubectl -n tornjak create secret generic kubeconfigs --from-file=/tmp/kubeconfigs` -# -# default namespace value = "spire" -# default serviceAccount value = spire-agent -k8s_psat: - remoteClusters: - # - name: cluster1 - # namespace: spire - # serviceAccount: spire-agent - # - name: cluster2 - # - name: cluster3 - # namespace: spire - # serviceAccount: spire-agent + # awd_iid - enables node attestion in AWS EKS. + # provide "access_key_id" and "secret_access_key" + # see complete documentation: https://github.com/spiffe/spire/blob/main/doc/plugin_server_nodeattestor_aws_iid.md + aws_iid: + # access_key_id: "ACCESS_KEY_ID" + # secret_access_key: "SECRET_ACCESS_KEY" + # skip_block_device: true -# awd_iid - enables node attestion in AWS EKS. -# provide "access_key_id" and "secret_access_key" -# see complete documentation: https://github.com/spiffe/spire/blob/main/doc/plugin_server_nodeattestor_aws_iid.md -# aws_iid: -# access_key_id: "ACCESS_KEY_ID" -# secret_access_key: "SECRET_ACCESS_KEY" -# skip_block_device: true + # azure_msi - enables node attestion using Azure MSI: + # see complete documentation: https://github.com/spiffe/spire/blob/main/doc/plugin_server_nodeattestor_azure_msi.md + azure_msi: + # tenants: + # - tenant: "TENANT_ID" diff --git a/demos/demo.deploy-init.sh b/demos/demo.deploy-init.sh index c506b2f3..faab0dc4 100755 --- a/demos/demo.deploy-init.sh +++ b/demos/demo.deploy-init.sh @@ -1,6 +1,7 @@ #!/bin/bash -# this script requires https://github.com/duglin/tools/tree/main/demoscript +# this script requires https://github.com/duglin/tools/tree/master/demoscript +# or https://github.com/mrsabath/tools/tree/master/demoscript declare DEMOFILE=~/workspace/tools/demoscript/demoscript if [ ! -f "$DEMOFILE" ]; then echo "$DEMOFILE does not exist." diff --git a/docs/README.md b/docs/README.md index e7b79891..ae89e1c4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3,9 +3,9 @@ This document describes the list of available documents to deploy and run various Tornjak demos. The demos are sorted by complexity, -starting with this simple ones and progressing into more complex. +starting with the simple ones and progressing into more complex. -We suggest running the demos in specified order. +We suggest running them in the specified order. ## Tornjak Deployment There are multiple ways to deploy Tornjak on Kubernetes. diff --git a/docs/spire-helm.md b/docs/spire-helm.md index 6f04fa91..e71c2187 100644 --- a/docs/spire-helm.md +++ b/docs/spire-helm.md @@ -83,12 +83,16 @@ minikube start --kubernetes-version=v1.20.2 Once the cluster is up and the `KUBECONFIG` is set, create the namespace to deploy Tornjak server. By default we use “tornjak” as namespace and "minikube" as the cluster name. ```console -export CLUSTERNAME=minikube +export CLUSTER_NAME=minikube export SPIRESERVER_NS=tornjak kubectl create ns $SPIRESERVER_NS ``` -## Helm Deployment +*IBMCloud Hint* +when running in IBMCloud, you can use a handy script to get cluster information +[utils/get-cluster-info.sh](../utils/get-cluster-info.sh) + +## Helm Deployment for Tornjak Now we should be ready to deploy the helm charts. This Helm chart requires several configuration parameters: * clustername - name of the cluster (required) * trustdomain - must match between SPIRE server and agents (required) @@ -102,7 +106,7 @@ helm inspect values charts/tornjak/ ### Helm installation execution Sample execution: ```console -helm install --set "namespace=tornjak" --set "clustername=$CLUSTERNAME" --set "trustdomain=openshift.space-x.com" tornjak charts/tornjak --debug +helm install --set "namespace=tornjak" --set "clustername=$CLUSTER_NAME" --set "trustdomain=openshift.space-x.com" tornjak charts/tornjak --debug ``` Let's review the Tornjak deployment: @@ -229,6 +233,7 @@ Only ONE instance of SPIRE Agent deployment should be running at once, as it runs as a daemonset on all the node. Running more than one may result in conflicts. +### Create a namespace First, create a namespace where we want to deploy our SPIRE agents. For the purpose of this tutorial, we will use “spire”. ```console @@ -265,6 +270,8 @@ In every cluster hosting SPIRE agents, including remote cluster, create `spire- kubectl -n spire apply -f spire-bundle.yaml ``` --- + +### Set up access to the SPIRE server In the next step, we need to setup a public access to the SPIRE Server, so SPIRE agents can access it. This is typically the Ingress value defined during the SPIRE Server deployment, @@ -306,6 +313,7 @@ Assuming the SPIRE server can now be accessed from the `spire` namespace, either via Ingress or Service on port 8081, we can deploy the helm charts. +## Helm Deployment for Spire Agents We continue using the same cluster name “minikube”, trust domain “openshift.space-x.com” and region "us-east". @@ -313,9 +321,9 @@ and region "us-east". Use `--debug` flag to show additional information about the helm deployment. ```console -helm install --set "spireAddress=$SPIRE_SERVER" \ ---set "spirePort=$SPIRE_PORT" --set "namespace=$AGENT_NS" \ ---set "clustername=$CLUSTERNAME" --set "region=us-east" \ +helm install --set "spireServer.address=$SPIRE_SERVER" \ +--set "spireServer.port=$SPIRE_PORT" --set "namespace=$AGENT_NS" \ +--set "clustername=$CLUSTER_NAME" --set "region=us-east" \ --set "trustdomain=openshift.space-x.com" \ spire charts/spire --debug ``` @@ -422,17 +430,17 @@ utils/createKeys.sh ``` For our example, this is: ```console -utils/createKeys.sh sample-keys/ $CLUSTERNAME $INGRESS-DOMAIN-NAME +utils/createKeys.sh sample-keys/ $CLUSTER_NAME $INGRESS-DOMAIN-NAME ``` Create a secret that is using the generated key and certificates: ``` kubectl -n tornjak create secret generic tornjak-certs \ ---from-file=key.pem="sample-keys/$CLUSTERNAME.key" \ ---from-file=cert.pem="sample-keys/$CLUSTERNAME.crt" \ ---from-file=tls.pem="sample-keys/$CLUSTERNAME.crt" \ ---from-file=mtls.pem="sample-keys/$CLUSTERNAME.crt" +--from-file=key.pem="sample-keys/$CLUSTER_NAME.key" \ +--from-file=cert.pem="sample-keys/$CLUSTER_NAME.crt" \ +--from-file=tls.pem="sample-keys/$CLUSTER_NAME.crt" \ +--from-file=mtls.pem="sample-keys/$CLUSTER_NAME.crt" ``` Then just simply restart the spire server by killing the **spire-server-0** pod diff --git a/docs/spire-hints.md b/docs/spire-hints.md new file mode 100644 index 00000000..3235a5e2 --- /dev/null +++ b/docs/spire-hints.md @@ -0,0 +1,153 @@ +# Debugging, Hints and Tips for Solving Common Problems +Here is a collection of various tips and hints for debugging +Universal Workload Identity deployment +with SPIRE and Tornjak + +The hints collection is grouped in the following sections: +* [SPIRE Agents](#spire-agents) +* [Workload Registrar](#workload-registrar) +* [SPIRE Server](#spire-server) + +## SPIRE Agents + +**Problem:** + +Agent log file shows an error: +``` +time="2021-10-01T15:26:14Z" level=info msg="SVID is not found. Starting node attestation" subsystem_name=attestor trust_domain_id="spiffe://openshift.space-x.com" +time="2021-10-01T15:26:44Z" level=error msg="Agent crashed" error="create attestation client: failed to dial dns:///spire-server-tornjak.space-x05-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-east.containers.appdomain.cloud:443: context deadline exceeded: connection error: desc = \"transport: authentication handshake failed: x509svid: could not verify leaf certificate: x509: certificate signed by unknown authority (possibly because of \\\"crypto/rsa: verification error\\\" while trying to verify candidate authority certificate \\\"SPIFFE\\\")\"" +``` + +**Description:** + +Incorrect keys or certificates required for attestation. +Either `spire-bundle` needs to be refreshed or the `kubeconfigs` +secret updated on the SPIRE server. + +**Solution:** +To update the "spire-bundle", +get the `spire-bundle` configmap from the SPIRE server, update the namespace to match the agent cluster, then deploy it agent namespace. + +On the server: +```console +kubectl -n tornjak get configmap spire-bundle -oyaml | kubectl patch --type json --patch '[{"op": "replace", "path": "/metadata/namespace", "value":"spire"}]' -f - --dry-run=client -oyaml > spire-bundle.yaml +``` + +On the agent cluster: +```console +kubectl -n spire create -f spire-bundle.yaml +``` + +In case of the remote clusters, follow steps outline [here](./spire-multi-cluster.md#enable-kubernetes-attestor) + +There is no need to restart the agents. +Once the updated `spire-bundle` is in place +the agents will pick up the changes on the next restart. + +--- +**Problem:** + +Agent log file shows an error: +``` +time="2021-10-05T18:33:55Z" level=error msg="Agent crashed" error="failed to receive attestation response: rpc error: code = Internal desc = nodeattestor(k8s_psat): unable to validate token with TokenReview API: unable to query token review API: Post \"https://c113.us-south.containers.cloud.ibm.com:31396/apis/authentication.k8s.io/v1/tokenreviews\": failed to refresh token: oauth2: cannot fetch token: 400 Bad Request\nResponse: {\"errorCode\":\"BXNIM0408E\",\"errorMessage\":\"Provided refresh token is expired\",\"context\":{\"requestId\":\"aWFtaWQtNi4xMC0xMTc5Ny1lMWY0MWI0LTc4NThmYzY2OWMtbGN4aGQ-a8b3fd9f52ed44cf9e48b274baf0f47f\",\"requestType\":\"incoming.Identity_Token\",\"userAgent\":\"Go-http-client/2.0\",\"url\":\"https://identity-3.us-south.iam.cloud.ibm.com\",\"instanceId\":\"iamid-6.10-11797-e1f41b4-7858fc669c-lcxhd\",\"threadId\":\"5093cf\",\"host\":\"iamid-6.10-11797-e1f41b4-7858fc669c-lcxhd\",\"startTime\":\"05.10.2021 18:33:55:376 UTC\",\"endTime\":\"05.10.2021 18:33:55:386 UTC\",\"elapsedTime\":\"10\",\"locale\":\"en_US\",\"clusterName\":\"iam-id-prod-us-south-dal13\"}}" +``` + +**Description:** + +Credential used in `kubeconfigs` secret expired for this cluster. + +**Solution:** + +Refresh the KUBECONFIG creds in the secret as described [here](./spire-multi-cluster.md#enable-kubernetes-attestor) + +--- + +**Problem:** + +**Description:** + +**Solution:** + +--- + +## Workload Registrar + +**Problem:** +The workload registrar log shows an error: +``` +time="2021-10-01T16:58:15Z" level=debug msg="Watching X.509 contexts" +time="2021-10-01T16:58:15Z" level=error msg="Failed to watch the Workload API: rpc error: code = PermissionDenied desc = no identity issued" +time="2021-10-01T16:58:15Z" level=debug msg="Retrying watch in 30s" +``` +**Description:** +The Workload Registrar cannot obtain its own identity because its instance was either: +* never registered with the SPIRE Server +* does not have appropriate admin permissions required to write into the SPIRE Server +* re-created on a different node, with a different Parent ID, then the initial instance, so it needs to be re-registered. + +**Solution:** +Register the current instance of the Workload Registrar with the SPIRE Server. +See the [documentation](./spire-workload-registrar.md#register-workload-registrar-with-the-spire-server) + +--- + +**Problem:** + +The workload registrar log shows an error: +``` +E1001 17:00:27.808343 17 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.2/tools/cache/reflector.go:125: Failed to list *v1beta1.SpiffeID: the server could not find the requested resource (get spiffeids.spiffeid.spiffe.io) +``` + +**Description:** + +During the previous execution of +`helm uninstall` or `utils/insta-open-shift-x.sh --clean` +there were active `spiffeid` objects that could not be deleted. +Now, successfully executing The Workload Registrar operator was able to delete +the stale `spiffeid` records and then +`spiffeids.spiffeid.spiffe.io` CRD was finalized +and eventually removed. +Now it need to be recreated. + +**Solution:** + +Either re-run the agents installation again, +or create the CRD manually: + +```console +kubectl -n spire create -f charts/spire/templates/spiffeid.spiffe.io_spiffeids.yaml +``` +--- +**Problem:** + +The workload registrar log shows an error: +``` +time="2021-10-01T16:50:45Z" level=error msg="Failed to watch the Workload API: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /run/spire/sockets/agent.sock: connect: no such file or directory\"" +``` +**Description:** + +The Workload Registar cannot connect to the SPIRE agent. + +**Solution:** + +Check the SPIRE agent running on the same node as the Workload Registrar. Fix the SPIRE agent. If the agent was restarted, +give it a minute or two, the connection should be recreated. +Make sure the permissions for accessing the socket are correct. + +--- + +**Problem:** + +**Description:** + +**Solution:** + +--- +## SPIRE Server +**Problem:** + +**Description:** + +**Solution:** + +--- diff --git a/docs/spire-multi-cluster.md b/docs/spire-multi-cluster.md index 3d0161d8..6cf6e1d0 100644 --- a/docs/spire-multi-cluster.md +++ b/docs/spire-multi-cluster.md @@ -42,6 +42,7 @@ depending on the needs. --- ### Enable AWS node attestor +* Tornjak/SPIRE server To use AWS node_attestor, we need to provide the following values in [Tornjak helm chart configuration file](../charts/tornjak/values.yaml): ```yaml @@ -55,6 +56,23 @@ Procedures for obtaining these values are [here](https://docs.aws.amazon.com/gen For more information about this plugin configuration see the [attestor documentation](https://github.com/spiffe/spire/blob/main/doc/plugin_server_nodeattestor_aws_iid.md) +* SPIRE Agents: +To use AWS node_attestor, we need to provide the following value in +[spire helm chart configuration file](../charts/spire/values.yaml): +```yaml +aws: true +``` + +Or add the `--set "aws=true"` flag to the helm command: +```console +helm install --set "spireServer.address=$SPIRE_SERVER" \ +--set "spireServer.port=$SPIRE_PORT" --set "namespace=$AGENT_NS" \ +--set "clustername=$CLUSTERNAME" --set "region=us-east" \ +--set "trustdomain=openshift.space-x.com" \ +--set "aws=true" \ +spire charts/spire --debug +``` + --- ### Enable Azure node attestor **TBD** @@ -108,6 +126,16 @@ kubectl -n tornjak create secret generic kubeconfigs --from-file=/tmp/kubeconfig ``` This has to be done before executing the Helm deployment. +If you ever need to update the existing credentials, +create new files, and then run: + +```console +kubectl -n tornjak create secret generic kubeconfigs --from-file=/tmp/kubeconfigs --save-config --dry-run=client -o yaml | kubectl -n tornjak apply -f - +``` + +This change requires SPIRE server restart, but not the agents. + + #### Step 1b. Update the Tornjak helm charts Once the secret is created, we need to update the helm charts to support the Kuberenetes attestor (`k8s_psat`). @@ -153,9 +181,6 @@ Agents deployment. --- - - - ## Adding multi-cluster support to an existing deployment This part describes the steps required to extend the existing Tornjak/SPIRE Server to support multiple clusters. @@ -173,6 +198,20 @@ whether cloud specific or Kubernetes one `k8s_psat`. ```console kubectl -n tornjak edit configmap spire-server ``` +Here is a sample configuration: + +```yaml +NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "tsi-kube01" = { + service_account_allow_list = ["spire:spire-agent"] + kube_config_file = "/run/spire/kubeconfigs/tsi-kube01" + } + } + } +} +``` * If using the secret for `k8s_psat` attestor, modify the SPIRE Server StatefulSet deployment Here we want to make the KUBECONFIGs available to the SPIRE server. diff --git a/docs/spire-oidc-aws-s3.md b/docs/spire-oidc-aws-s3.md index 74bfe35d..f5340595 100644 --- a/docs/spire-oidc-aws-s3.md +++ b/docs/spire-oidc-aws-s3.md @@ -104,46 +104,56 @@ The IAM role contains the connection parameters for the OIDC federation to AWS s 5. Click **Next: Review** to skip the **Add Tags** screen. -6. Type the name `oidc-federation-test-role` for the IAM role and click **Create role**. +6. Type the name `oidc-federation-spacex-demo-role` for the IAM role and click **Create role**. ### Add the SPIFFE ID to the IAM Role To allow the workload from outside AWS to access AWS S3, add the workload’s SPIFFE ID to the IAM role. This restricts access to the IAM role to JWT SVIDs with the specified SPIFFE ID. -1. Click **Roles** on the left, use the search field to find the `oidc-federation-test-role` IAM role that you created in the last section, and click the role. +1. Click **Roles** on the left, use the search field to find the `oidc-federation-spacex-demo-role` IAM role that you created in the last section, and click the role. 2. At the top of the **Summary** page, next to **Role ARN**, copy the role ARN into the clipboard by clicking the small icon at the end. Save the ARN in a file such as `oidc-arn.txt` for use in the testing section. 3. Click the **Trust relationships** tab near the middle of the page and then click **Edit trust relationship**. -4. In the JSON access control policy, modify a condition line at the end of the `StringEquals` attribute to restrict access to workloads matching the workload SPIFFE ID that we will be using for testing. The new code is: +4. In the JSON access control policy, modify a condition line at the end of the `StringEquals` attribute to restrict access to workloads matching the workload SPIFFE ID that we will be using for testing. +The sample code is: ``` + . . . "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "ForAllValues:StringLike": { - "oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud:sub": "spiffe://openshift.space-x.com/eu-*/*/*/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b", + "oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud:sub": "spiffe://openshift.space-x.com/region/*/cluster_name/*/ns/*/sa/elon-musk/pod_name/mars-mission-*", "oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud:aud": "mys3" } } ``` - This policy states that only containers with `openshift.space-x.com` trust domain, deployed in Europe, running on `elon-mask` service account with `mars-mission-main` container can access this S3 bucket. + This policy states that only containers with `openshift.space-x.com` trust domain, + deployed in any `region`, in any `cluster_name` and in any `namespace`, + but only using **elon-musk** `serviceAccount` and pod that starts with name **mars-mission** + can access this S3 bucket. 5. Click **Update Trust Policy**. This change to the IAM role takes a minute or two to propagate. ## Test Access to AWS S3 -We are going to start a container in our SPIRE environment. This container has AWS S3 as well as SPIRE agent binaries for running the experiment. +We are going to start a container in our SPIRE environment. +This container already has AWS S3 cli as well as SPIRE agent binaries +for running this experiment. -Additionally, the test deployment file [examples/spire/mars-spaceX.yaml](examples/spire/mars-spaceX.yaml) has an annotation like this: +Additionally, the test deployment file [examples/spire/mars-spaceX.yaml](examples/spire/mars-spaceX.yaml) has a following label: ```yaml -metadata: - annotations: - spire-workload-id: eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b - +template: + metadata: + labels: + identity_template: "true" +``` +This label indicates that pod will have a identity in the format: +``` +identity_template = "{{ "region/{{.Context.Region}}/cluster_name/{{.Context.ClusterName}}/ns/{{.Pod.Namespace}}/sa/{{.Pod.ServiceAccount}}/pod_name/{{.Pod.Name}}" }}" ``` -This represent a sample identity of this container. Just for the demo purpose. Start the test container in `default` namespace. ``` @@ -165,7 +175,7 @@ requesting **audience** for `mys3`: bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock ``` -Get the long JWT token that follows the `token(spiffe://openshift.space-x.com/eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b):` +Get the long JWT token that follows the `token(spiffe://openshift.space-x.com/region/us-east/cluster_name/space-x03/ns/default/sa/elon-musk/pod_name/mars-mission-7874fd667c-rhq9d):` and save it in the file `token.jwt`. Now, build the AWS request, where: @@ -176,7 +186,7 @@ Now, build the AWS request, where: In our example this would be: ```console -AWS_ROLE_ARN=arn:aws:iam::581274594392:role/oidc-federation-test-role-space-x-01 AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt aws s3 cp s3://tsi-spire-bucket/test.txt secret-file.txt +AWS_ROLE_ARN=arn:aws:iam::581274594392:role/oidc-federation-spacex-demo-role AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt aws s3 cp s3://tsi-spire-bucket/test.txt secret-file.txt ``` If everything went fine, we should now have a local file `secret-file.txt` that diff --git a/docs/spire-oidc-tutorial.md b/docs/spire-oidc-tutorial.md index 13e307db..e0451bff 100644 --- a/docs/spire-oidc-tutorial.md +++ b/docs/spire-oidc-tutorial.md @@ -9,13 +9,9 @@ In this example we will deploy Tornjak and SPIRE server on OpenShift in IBM Clou Follow the documentation to deploy [Tornjak on Openshift](./spire-on-openshift.md#deploy-on-openshift]) with exception of enabling the `--oidc` flag: -```console -# check if rootCA is present: -ls sample-keys/CA -rootCA.crt rootCA.key rootCA.srl - +``` # install: -utils/install-open-shift-tornjak.sh -c -t -p --oidc +utils/install-open-shift-tornjak.sh -c $CLUSTER_NAME -t $TRUST_DOMAIN -p $PROJECT_NAME --oidc ``` for example: @@ -65,6 +61,9 @@ This output confirms that the OIDC endpoint is accessible and responds with vali Let's install the [SPIRE Agents](./spire-on-openshift.md#step-2-installing-spire-agents-on-openshift): ``` +oc new-project spire --description="My TSI Spire Agent project on OpenShift" +kubectl get configmap spire-bundle -n tornjak -o yaml | sed "s/namespace: tornjak/namespace: spire/" | kubectl apply -n spire -f - + export SPIRE_SERVER=spire-server-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud utils/install-open-shift-spire.sh -c space-x.01 -s $SPIRE_SERVER -t openshift.space-x.com diff --git a/docs/spire-oidc-vault.md b/docs/spire-oidc-vault.md index 222a8b99..98d9e0af 100644 --- a/docs/spire-oidc-vault.md +++ b/docs/spire-oidc-vault.md @@ -2,8 +2,8 @@ In this tutorial we show how to use SPIRE and OIDC to authenticate workloads to retrieve Vault secrets. -This tutorial is based on the documentation for [Using SPIRE JWT-SVIDs to Authenticate -to Hashicorp Vault](https://spiffe.io/docs/latest/keyless/vault/readme/) +This tutorial is based on the SPIFFE documentation [("Using SPIRE JWT-SVIDs to Authenticate +to Hashicorp Vault")](https://spiffe.io/docs/latest/keyless/vault/readme/) This part of the tutorial assumes that OIDC is already [enabled on SPIRE](./spire-oidc-tutorial.md) @@ -28,6 +28,7 @@ vault login -no-print "${ROOT_TOKEN}" ## Configure a Vault instace: We have a script [examples/spire/vault-oidc.sh](../examples/spire/vault-oidc.sh) that configures the Vault instance with the required demo configuration, but before we run it, let's first explain what happens. +**All the commands listed here are in the script, so don't run them!** First few commands enable the Secret Engine and setup Vault OIDC Federation with our instance of SPIRE. @@ -43,7 +44,7 @@ vault auth enable jwt Set up our OIDC Discovery URL, using the values created in [OIDC tutorial setup](./spire-oidc-tutorial.md) and using defalt role **dev**: ``` -vault write auth/jwt/config oidc_discovery_url=$SPIRE_SERVER default_role=“dev” +vault write auth/jwt/config oidc_discovery_url=$OIDC_URL default_role=“dev” ``` Define a policy `my-dev-policy` that gives `read` access to `my-super-secret`: @@ -70,14 +71,14 @@ cat > role.json < ``` +Here is our example: +```console +examples/spire/vault-oidc.sh https://oidc-tornjak.space-x01-9d995c4a8c7c5f281ce13d546a94-0000.us-east.containers.appdomain.cloud $ROOT_TOKEN $VAULT_ADDR +``` + -Now, create a test secret value: +Once the script successfully completes, +create a test secret value: ```console vault kv put secret/my-super-secret test=123 ``` @@ -105,20 +112,22 @@ vault kv put secret/my-super-secret test=123 For testing this setup we are going to use [examples/spire/mars-spaceX.yaml](examples/spire/mars-spaceX.yaml) deployment. -Based on the following annotation: +Make sure the pod label matches the label in The Workload Registrar Configuration. ```yaml -metadata: - annotations: - spire-workload-id: eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b +template: + metadata: + labels: + identity_template: "true" + app: mars-mission ``` -this container will get the following identity: +this container will get the identity that might look like this: -`eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b` +`spiffe://openshift.space-x.com/region/us-east/cluster_name/space-x.01/ns/default/sa/elon-musk/pod_name/mars-mission-7874fd667c-rchk5` -Let's create a container and get inside: +Let's create a pod and get inside the container: ```console kubectl -n default create -f examples/spire/mars-spaceX.yaml @@ -148,7 +157,7 @@ The JWT token is the long string that follows the **token**: ```console bin/spire-agent api fetch jwt -audience vault -socketPath /run/spire/sock ets/agent.sock -token(spiffe://openshift.space-x.com/eu-de/space-x.01/default/elon-musk/mars-mission-main/c0d076b51c28dc937a70a469b4cc946fb465ab6c86d6ae89ae2cf8eac1f55d6b): +token(spiffe://openshift.space-x.com/region/us-east/cluster_name/space-x.01/ns/default/sa/elon-musk/pod_name/mars-mission-7874fd667c-rchk5): eyJhbGciOiJSUzI1NiIs....cy46fb465a ``` @@ -161,7 +170,7 @@ Export also `eurole` as **ROLE** and actual **VAULT_ADDR** ```console export ROLE=eurole -export VAULT_ADDR=http://tsi-kube01-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-south.containers.appdomain.cloud +export VAULT_ADDR=http://tsi-vault-tsi-vault.space-x01-9d995c4a8c7c5f281ce13d546a94-0000.us-east.containers.appdomain.cloud ``` Now let's try to login to Vault using the JWT token: diff --git a/docs/spire-workload-registrar.md b/docs/spire-workload-registrar.md index 41fdea1d..6cf26ddc 100644 --- a/docs/spire-workload-registrar.md +++ b/docs/spire-workload-registrar.md @@ -60,7 +60,9 @@ metadata: labels: identity_template: "true" ``` -will get their identity. +will get their identity. More information about the `identity_template` is +available in the SPIRE community document about the +[workload-registrar](https://github.com/spiffe/spire/blob/main/support/k8s/k8s-workload-registrar/mode-crd/README.md#identity-template-based-workload-registration) ## Register Workload Registrar with the SPIRE server. Workload Registrar will use its own identity to register other elements @@ -84,7 +86,8 @@ This requires a few steps: ``` In this case the registrar is running on the node `10.38.240.214` - There are 2 methods to register Workload Registrar: + +There are currently 2 methods to register Workload Registrar: ### Register Workload Registrar using Tornjak UI * Connect to Tornjak server UI, and list the agents. @@ -98,7 +101,7 @@ Get the SVID (SPIFFE ID) of the agent running on the specific node (as above). Y ``` - Under Selectors Recommendation, select the `selectors` suggested by the installation under `Selectors Recommendation`. For example, if the installation suggests the following: ``` - k8s:sa:spire-k8s-registrar,k8s:ns:spire,k8s:container-name:k8s-workload-registrar + k8s:sa:spire-k8s-registrar, k8s:ns:spire, k8s:container-name:k8s-workload-registrar ``` check off `k8s:sa`, `k8s:ns`, `k8s:container-name`. Then under `Selectors`, fill in the suggested values. - Make sure to check the `Admin Flag`, so the registrar gets enough permissions to create new entries. diff --git a/examples/spire/Dockerfile.mars b/examples/spire/Dockerfile.mars new file mode 100644 index 00000000..095ad6ba --- /dev/null +++ b/examples/spire/Dockerfile.mars @@ -0,0 +1,51 @@ +FROM ubuntu:18.04 + +RUN apt update && \ + apt install -y curl && \ + apt install coreutils && \ + apt install -y wget && \ + apt install -y unzip && \ + apt install -y jq && \ + apt install -y vim + +# install yq required for xform YAML to JSON +RUN apt-get install -y software-properties-common && \ + add-apt-repository ppa:rmescandon/yq && \ + apt update && apt install -y yq + +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ + chmod +x kubectl +RUN wget https://releases.hashicorp.com/vault/1.4.2/vault_1.4.2_linux_amd64.zip && \ + unzip vault_1.4.2_linux_amd64.zip && \ + mkdir -p /usr/local/bin/ && \ + mv vault /usr/local/bin/ && \ + rm -f vault_1.4.2_linux_amd64.zip + +# get a demo script from https://github.com/duglin/tools/tree/master/demoscript +# or https://github.com/mrsabath/tools/tree/master/demoscript +RUN curl -LO https://raw.githubusercontent.com/mrsabath/tools/master/demoscript/demoscript && \ + chmod +x demoscript && \ + mv demoscript /usr/local/bin + +COPY demo.mars.sh demo.sh + +# adding Spire agent +RUN VERSION=1.0.2 && \ + wget https://github.com/spiffe/spire/releases/download/v${VERSION}/spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ + tar zvxf spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ + mkdir -p /opt/spire/bin && \ + mv /spire-${VERSION}/bin/spire-agent /opt/spire/bin/ && \ + rm -rf spire-${VERSION}/ && \ + rm -f spire-${VERSION}-linux-x86_64-glibc.tar.gz + +# add AWS CLI +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install && \ + rm -rf aws && \ + rm -f awscliv2.zip + +RUN cd /root + +# run it forever +CMD ["/bin/bash", "-c", "tail -f /dev/null"] diff --git a/examples/spire/Makefile b/examples/spire/Makefile new file mode 100644 index 00000000..4a890ec1 --- /dev/null +++ b/examples/spire/Makefile @@ -0,0 +1,27 @@ +.PHONY: bin/mars container-mars + +GIT_COMMIT_SHA="$(shell git rev-parse --short HEAD 2>/dev/null)" +REPO ?= tsidentity +MARS_IMG_NAME ?= mars-demo +VERSION=$(shell cat ../../tsi-version.txt) +# GO_FILES := $(shell find . -type f -name '*.go' -not -name '*_test.go' -not -path './vendor/*') + +MARS_IMG := $(REPO)/$(MARS_IMG_NAME):$(GIT_COMMIT_SHA) +MARS_IMG_MUTABLE := $(REPO)/$(MARS_IMG_NAME):$(VERSION) +MARS_IMG_LATEST := $(REPO)/$(MARS_IMG_NAME):latest + +all: bin/mars container-mars + +bin/mars: + docker build --no-cache -t $(MARS_IMG) -f Dockerfile.mars . + docker tag $(MARS_IMG) $(MARS_IMG_MUTABLE) + docker tag $(MARS_IMG) $(MARS_IMG_LATEST) + +container-mars: + docker push $(MARS_IMG) + docker push $(MARS_IMG_MUTABLE) + docker push $(MARS_IMG_LATEST) + +# vendor: +# go mod tidy +# go mod vendor diff --git a/examples/spire/demo.mars.sh b/examples/spire/demo.mars.sh new file mode 100755 index 00000000..ee98b428 --- /dev/null +++ b/examples/spire/demo.mars.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# this script requires https://github.com/duglin/tools/tree/main/demoscript +declare DEMOFILE=/usr/local/bin/demoscript +if [ ! -f "$DEMOFILE" ]; then + echo "$DEMOFILE does not exist." + exit 1 +fi +source ${DEMOFILE} + +# bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock +# vi token.jwt # get JWT token +# bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock | sed -n '2p' | x +# args > token.jwt +# +# +# AWS_ROLE_ARN=arn:aws:iam::581274594392:role/mars-mission-role AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt aws s3 cp s3://mars-spire/mars.txt top-secret.txt + +doit "/opt/spire/bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock" +doit --noexec "/opt/spire/bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock | sed -n '2p' | xargs > token.jwt" +/opt/spire/bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock | sed -n '2p' | xargs > token.jwt +# doit cat token.jwt +doit AWS_ROLE_ARN=arn:aws:iam::581274594392:role/mars-mission-role AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt aws s3 cp s3://mars-spire/mars.txt top-secret.txt +doit cat top-secret.txt diff --git a/examples/spire/mars-demo.yaml b/examples/spire/mars-demo.yaml new file mode 100644 index 00000000..6286fcc0 --- /dev/null +++ b/examples/spire/mars-demo.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: elon-musk +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mars-mission + labels: + app: mars-mission +spec: + replicas: 1 + selector: + matchLabels: + app: mars-mission + template: + metadata: + labels: + identity_template: "true" + app: mars-mission + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: elon-musk + containers: + - name: mars-mission-main + securityContext: + # privilaged is needed to create socket and bundle files + privileged: true + image: tsidentity/mars-demo:latest + command: ["sleep"] + args: ["1000000000"] + volumeMounts: + - name: spire-agent-socket + mountPath: /run/spire/sockets + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/sockets + type: Directory diff --git a/examples/spire/mars-spaceX1.yaml b/examples/spire/mars-spaceX1.yaml index 6f9aec2b..036032e9 100644 --- a/examples/spire/mars-spaceX1.yaml +++ b/examples/spire/mars-spaceX1.yaml @@ -12,6 +12,7 @@ spec: template: metadata: labels: + identity_template: "true" app: mars-mission1 spec: containers: diff --git a/examples/spire/vault-oidc.sh b/examples/spire/vault-oidc.sh index 11cde31d..3987d280 100755 --- a/examples/spire/vault-oidc.sh +++ b/examples/spire/vault-oidc.sh @@ -13,9 +13,9 @@ helpme() { cat < +Syntax: ${0} Where: - OIDC URL - OIDC URL (https://) (optional, if set as env. var) + OIDC_URL - OIDC URL (https://) (optional, if set as env. var) ROOT_TOKEN - Vault root token to setup the plugin (optional, if set as env. var) VAULT_ADDR - Vault address in format http://vault.server:8200 (optional, if set as env. var) @@ -73,15 +73,15 @@ EOF "bound_audiences": "vault", "bound_claims_type": "glob", "bound_claims": { - "sub":"spiffe://openshift.space-x.com/eu-*/*/*/elon-musk/mars-mission-main/*" + "sub":"spiffe://openshift.space-x.com/region/*/cluster_name/*/ns/*/sa/elon-musk/pod_name/mars-mission-*" }, "token_ttl": "24h", "token_policies": "my-dev-policy" } EOF - vault write auth/jwt/role/eurole -/dev/null if [ "$?" != "0" ]; then echo "Project $PROJECT must be created first" - echo "oc new-project $PROJECT --description=\"My TSI Spire Agent project on OpenShift\" 2> /dev/null" + echo "oc new-project $PROJECT --description=\"My TSI Spire Agent project on OpenShift\" " exit 1 fi - oc -n $PROJECT get scc $SPIREAG_SCC + oc -n $PROJECT get scc $SPIREAG_SCC 2>/dev/null if [ "$?" == "0" ]; then # check if spire-agent project exists: echo "SPIRE Agent environment in $PROJECT project already exists. " @@ -128,16 +128,17 @@ installSpireAgent(){ cleanup # while (oc get projects | grep -v spire-server | grep "$PROJECT"); do echo "Waiting for $PROJECT removal to complete"; sleep 2; done # oc new-project "$PROJECT" --description="My TSI Spire Agent project on OpenShift" > /dev/null - oc project "$PROJECT" else echo "Keeping the existing $PROJECT project as is" fi fi +oc project "$PROJECT" + # Need to copy the spire-bundle from the server namespace oc -n "$PROJECT" get cm spire-bundle if [ "$?" == "0" ]; then - echo "WARNING: using the existing configmap spire-bundle in $PROJECT. " + echo "Using the existing configmap spire-bundle in $PROJECT. " else echo "ConfigMap 'spire-bundle' must be created" exit 1 @@ -178,7 +179,7 @@ oc_cli adm policy add-scc-to-user spire-agent "system:serviceaccount:$PROJECT:$S # this works: oc_cli adm policy add-scc-to-user privileged -z $SPIRE_AG_SA -helm install --set "spireAddress=$SPIRESERVER" --set "namespace=$PROJECT" \ +helm install --set "spireServer.address=$SPIRESERVER" --set "namespace=$PROJECT" \ --set "clustername=$CLUSTERNAME" --set "trustdomain=$TRUSTDOMAIN" \ --set "region=$REGION" \ --set "openShift=true" spire charts/spire # --debug diff --git a/utils/install-open-shift-tornjak.sh b/utils/install-open-shift-tornjak.sh index dc0b8382..a99d18cf 100755 --- a/utils/install-open-shift-tornjak.sh +++ b/utils/install-open-shift-tornjak.sh @@ -23,7 +23,7 @@ Syntax: ${0} -c -t -p --oidc Where: -c - name of the OpenShift cluster (required) -t - the trust root of SPIFFE identity provider, default: spiretest.com (optional) - -p - OpenShift project [namespace] to install the Server, default: spire-server (optional) + -p - OpenShift project [namespace] to install the Server, default: tornjak (optional) --oidc - execute OIDC installation (optional) --clean - performs removal of project (allows additional parameters i.e. -p|--project). HELPMEHELPME @@ -36,12 +36,13 @@ cleanup() { oc delete ClusterRole spire-server-role 2>/dev/null oc delete ClusterRoleBinding spire-server-binding 2>/dev/null + oc delete statefulset.apps/spire-server 2>/dev/null oc delete scc "$SPIRE_SCC" 2>/dev/null oc delete sa "$SPIRE_SA" 2>/dev/null - oc delete route spire-server 2>/dev/null - oc delete route tornjak-http 2>/dev/null - oc delete route tornjak-mtls 2>/dev/null - oc delete route tornjak-tls 2>/dev/null + oc delete secret spire-secret tornjak-certs 2>/dev/null + oc delete cm spire-bundle spire-server oidc-discovery-provider 2>/dev/null + oc delete service spire-server spire-oidc tornjak-http tornjak-mtls tornjak-tls 2>/dev/null + oc delete route spire-server tornjak-http tornjak-mtls tornjak-tls oidc 2>/dev/null oc delete ingress spireingress 2>/dev/null #oc delete group $GROUPNAME --ignore-not-found=true #oc delete project "$PROJECT" 2>/dev/null @@ -96,7 +97,7 @@ fi # function for executing oc cli calls oc_cli() { -oc "$@" +oc -n "$PROJECT" "$@" if [ "$?" != "0" ]; then echo "Error executing: oc" "$@" exit 1 @@ -108,11 +109,12 @@ installSpireServer(){ oc get projects | grep $PROJECT if [ "$?" != "0" ]; then echo "Project $PROJECT must be created first" - echo "oc new-project $PROJECT --description=\"My TSI Spire SERVER project on OpenShift\" 2> /dev/null" + echo "oc new-project $PROJECT --description=\"My TSI Spire SERVER project on OpenShift\" " exit 1 fi - oc -n $PROJECT get statefulset spire-server + # test if Tornjak already exists: + oc -n $PROJECT get statefulset spire-server 2>/dev/null if [ "$?" == "0" ]; then # check if spire-server project exists: echo "$PROJECT project already exists. " @@ -124,14 +126,15 @@ installSpireServer(){ cleanup # while (oc get projects | grep "$PROJECT"); do echo "Waiting for "$PROJECT" removal to complete"; sleep 2; done # oc new-project "$PROJECT" --description="My TSI Spire SERVER project on OpenShift" 2> /dev/null - oc project "$PROJECT" 2> /dev/null else echo "Keeping the existing $PROJECT project as is" echo 0 fi - fi +# switch to `tornjak` namespace: +oc project "$PROJECT" 2> /dev/null + # get ingress information: INGSEC=$(ibmcloud oc cluster get --cluster "$CLUSTERNAME" --output json | jq -r '.ingressSecretName') if [ -z "${INGSEC}" ]; then @@ -192,24 +195,24 @@ if ! $OIDC ; then --set "clustername=$CLUSTERNAME" \ --set "trustdomain=$TRUSTDOMAIN" \ --set "openShift=true" \ - tornjak charts/tornjak # --debug + tornjak charts/tornjak #--debug else ING=$(ibmcloud oc cluster get --cluster "$CLUSTERNAME" --output json | jq -r '.ingressHostname') helm install --set "namespace=$PROJECT" \ --set "clustername=$CLUSTERNAME" \ --set "trustdomain=$TRUSTDOMAIN" \ - --set "OIDC.enable=true" \ - --set "OIDC.MY_DISCOVERY_DOMAIN=$ING" \ + --set "oidc.enable=true" \ + --set "oidc.myDiscoveryDomain=$ING" \ --set "openShift=true" \ - tornjak charts/tornjak # --debug + tornjak charts/tornjak #--debug fi helm list # oc -n $PROJECT expose svc/$SPIRESERVER # Ingress route for spire-server -oc_cli -n "$PROJECT" create route passthrough --service spire-server -oc_cli -n "$PROJECT" get route +oc_cli create route passthrough --service spire-server +oc_cli get route INGRESS=$(oc -n "$PROJECT" get route spire-server -o jsonpath='{.spec.host}{"\n"}') echo "$INGRESS" @@ -242,37 +245,37 @@ spec: EOF # create route for Tornjak TLS: -oc_cli -n "$PROJECT" create route passthrough tornjak-tls --service tornjak-tls +oc_cli create route passthrough tornjak-tls --service tornjak-tls # create route for Tornjak mTLS: -oc_cli -n "$PROJECT" create route passthrough tornjak-mtls --service tornjak-mtls +oc_cli create route passthrough tornjak-mtls --service tornjak-mtls # create route for Tornjak HTTP: # oc create route passthrough tornjak-http --service tornjak-http -oc_cli -n "$PROJECT" expose svc/tornjak-http +oc_cli expose svc/tornjak-http if $OIDC ; then # open edge access for oidc oc -n $PROJECT create route edge oidc --service spire-oidc fi -SPIRESERV=$(oc get route spire-server --output json | jq -r '.spec.host') +SPIRESERV=$(oc -n "$PROJECT" get route spire-server --output json | jq -r '.spec.host') echo # "https://$SPIRESERV" echo "export SPIRE_SERVER=$SPIRESERV" echo # empty line to separate visually -TORNJAKHTTP=$(oc get route tornjak-http --output json | jq -r '.spec.host') -echo "Tornjak (http): http://$TORNJAKHTTP/" -TORNJAKTLS=$(oc get route tornjak-tls --output json | jq -r '.spec.host') -echo "Tornjak (TLS): https://$TORNJAKTLS/" -TORNJAKMTLS=$(oc get route tornjak-mtls --output json | jq -r '.spec.host') -echo "Tornjak (mTLS): https://$TORNJAKMTLS/" +TORNJAKHTTP=$(oc -n "$PROJECT" get route tornjak-http --output json | jq -r '.spec.host') +echo "Tornjak (http): http://$TORNJAKHTTP" +TORNJAKTLS=$(oc -n "$PROJECT" get route tornjak-tls --output json | jq -r '.spec.host') +echo "Tornjak (TLS): https://$TORNJAKTLS" +TORNJAKMTLS=$(oc -n "$PROJECT" get route tornjak-mtls --output json | jq -r '.spec.host') +echo "Tornjak (mTLS): https://$TORNJAKMTLS" echo # empty line to separate visually echo "Trust Domain: $TRUSTDOMAIN" if $OIDC ; then - OIDCURL=$(oc get route oidc --output json | jq -r '.spec.host') - echo "Tornjak (oidc): " - echo " https://$OIDCURL/" - echo "For testing oidc: " + OIDCURL=$(oc -n "$PROJECT" get route oidc --output json | jq -r '.spec.host') + echo "Tornjak with OIDC: " + echo " export OIDC_URL=https://$OIDCURL" + echo "For testing OIDC: " echo " curl -k https://$OIDCURL/.well-known/openid-configuration" echo " curl -k https://$OIDCURL/keys" fi