From e3de469911e276c4f2eba628d472a2209d9973d9 Mon Sep 17 00:00:00 2001 From: vinayakgadag Date: Thu, 5 May 2022 20:56:10 -0400 Subject: [PATCH] #30-milestone4: Rancher, K8S, and Custos setup --- deployment_scripts/README.md | 153 +--- deployment_scripts/concourse_cd/README.md | 155 ++++ .../concourse_cd/k8s-secrets.yml | 11 + .../{ => concourse_cd}/namespace.yaml | 0 deployment_scripts/concourse_cd/pv.yaml | 45 + deployment_scripts/concourse_cd/pv1.yaml | 23 + deployment_scripts/custos/README.md | 11 + .../custos/cert-manager/README.md | 7 + .../custos/cert-manager/issuer.yaml | 19 + deployment_scripts/custos/consul/README.md | 19 + deployment_scripts/custos/consul/config.yaml | 3 + deployment_scripts/custos/consul/pv.yaml | 18 + deployment_scripts/custos/consul/pv1.yaml | 18 + deployment_scripts/custos/consul/pv2.yaml | 18 + .../custos/consul/storage_class.yaml | 6 + deployment_scripts/custos/keycloak/README.md | 16 + .../custos/keycloak/custos-keycloak.yaml | 12 + .../custos/keycloak/ingress.yaml | 22 + .../custos/keycloak/keycloak-db-secret.yaml | 13 + .../custos/keycloak/operator.yaml | 33 + .../postgresql/postgresql-values.yaml | 829 +++++++++++++++++ .../custos/keycloak/postgresql/pv.yaml | 14 + .../custos/keycloak/postgresql/pv1.yaml | 14 + .../custos/keycloak/postgresql/pv2.yaml | 14 + deployment_scripts/custos/mysql/README.md | 8 + deployment_scripts/custos/mysql/pv.yaml | 14 + deployment_scripts/custos/mysql/pv1.yaml | 14 + deployment_scripts/custos/mysql/values.yaml | 841 ++++++++++++++++++ deployment_scripts/custos/vault/README.md | 10 + deployment_scripts/custos/vault/ingress.yaml | 22 + deployment_scripts/custos/vault/values.yaml | 731 +++++++++++++++ deployment_scripts/metric_server.yaml | 0 32 files changed, 2970 insertions(+), 143 deletions(-) create mode 100644 deployment_scripts/concourse_cd/README.md create mode 100644 deployment_scripts/concourse_cd/k8s-secrets.yml rename deployment_scripts/{ => concourse_cd}/namespace.yaml (100%) create mode 100644 deployment_scripts/concourse_cd/pv.yaml create mode 100644 deployment_scripts/concourse_cd/pv1.yaml create mode 100644 deployment_scripts/custos/README.md create mode 100644 deployment_scripts/custos/cert-manager/README.md create mode 100644 deployment_scripts/custos/cert-manager/issuer.yaml create mode 100644 deployment_scripts/custos/consul/README.md create mode 100644 deployment_scripts/custos/consul/config.yaml create mode 100644 deployment_scripts/custos/consul/pv.yaml create mode 100644 deployment_scripts/custos/consul/pv1.yaml create mode 100644 deployment_scripts/custos/consul/pv2.yaml create mode 100644 deployment_scripts/custos/consul/storage_class.yaml create mode 100644 deployment_scripts/custos/keycloak/README.md create mode 100644 deployment_scripts/custos/keycloak/custos-keycloak.yaml create mode 100644 deployment_scripts/custos/keycloak/ingress.yaml create mode 100644 deployment_scripts/custos/keycloak/keycloak-db-secret.yaml create mode 100644 deployment_scripts/custos/keycloak/operator.yaml create mode 100644 deployment_scripts/custos/keycloak/postgresql/postgresql-values.yaml create mode 100644 deployment_scripts/custos/keycloak/postgresql/pv.yaml create mode 100644 deployment_scripts/custos/keycloak/postgresql/pv1.yaml create mode 100644 deployment_scripts/custos/keycloak/postgresql/pv2.yaml create mode 100644 deployment_scripts/custos/mysql/README.md create mode 100644 deployment_scripts/custos/mysql/pv.yaml create mode 100644 deployment_scripts/custos/mysql/pv1.yaml create mode 100644 deployment_scripts/custos/mysql/values.yaml create mode 100644 deployment_scripts/custos/vault/README.md create mode 100644 deployment_scripts/custos/vault/ingress.yaml create mode 100644 deployment_scripts/custos/vault/values.yaml delete mode 100644 deployment_scripts/metric_server.yaml diff --git a/deployment_scripts/README.md b/deployment_scripts/README.md index 941ae12..37d02ff 100644 --- a/deployment_scripts/README.md +++ b/deployment_scripts/README.md @@ -1,146 +1,13 @@ -## Goal: Continuous deployment using Concourse CI/CD tool +### Distributed system configurations and installation -TL;DR +This contains two components -### Pipelines +- CI/CD using concourse + - Builds and deploy to kubernetes cluster + - Cluster is running in JS2 instance + - Cluster is configured using JS2 openstack [documentation](https://docs.jetstream-cloud.org/general/k8scluster/) -```api-gateway``` | ```data-source``` | ```user-management``` | ```merra-data-source``` - -- Pipeline for each service resides under directory ``deployment_scripts/concourse_cd`` - -#### Concourse Installation - -- We are using concourse as a service -- Concourse is deployed in K8S using helm chart -- ``$ helm repo add concourse https://concourse-charts.storage.googleapis.com/`` -- ``$ helm install my-release concourse/concourse`` -- Create K8S cluster secrets for concourse to connect to K8S cluster for deployment - - -#### Concourse definitions - -- Define resource types and resources - - : Resource Type - - - Kubernetes docker resource to deploy the applications to k8s cluster - - We used resource type {kudohn/concourse-k8s-resource} - ```yaml - resource_types: - - name: k8s - type: docker-image - source: - repository: kudohn/concourse-k8s-resource - ``` - - - : Resources - - - We need explicitly define the resource types, which are declared before we can use them. so we are going to tell concourse that we want to use kubernetes external resource ``{kudohn/concourse-k8s-resource}`` - - we are using ```{git}``` resource supported by concourse which will pull the code from code repositories and we are using gitHub in our case - - We are using ```{registry-image}``` resource to build and publish our docker image to docker-hub - - We are using ``` {k8s} ``` reource, as defined above in the resource type section to connect to k8s cluster and deploy applications - ```yaml - resources: - - name: git-ds - type: git - icon: git - source: - uri: https://github.com/airavata-courses/DCoders.git - branch: feature/30-implement-cd - - # Where we will push the image - - name: publish-image - type: registry-image - icon: docker - source: - repository: vinayakasgadag/decoders-datasource - tag: milestone-3 - username: vinayakasgadag - password: ((k8s-secret.docker-hub-pwd)) - - - name: k8s - type: k8s - icon: kubernetes - source: - api_server_url: ((k8s-secret.k8s-cluster-url)) - api_server_cert: ((k8s-secret.api_server_cert)) - client_cert: ((k8s-secret.client_cert)) - client_key: ((k8s-secret.client_key)) - skip_tls_verify: false - ``` - - : Jobs - - - Jobs are resposible for creating the required tasks (defined in job definition) to run the pipeline - - Jobs have stages(plan) and tasks - - Stage or plan: it can be defined to be dependent on previous stage and run only if it is successful - - Task: Useful when we want to run some scripts to achieve somehting before the job can be run - ```yaml - jobs: - - name: "build-publish" - public: true - serial: true - plan: - - get: git-ds - trigger: true - - task: build-image-task - privileged: true - config: - platform: linux - image_resource: - type: registry-image - source: - repository: vito/oci-build-task - inputs: - - name: git-ds - outputs: - - name: image - params: - CONTEXT: git-ds/datasource_service - UNPACK_ROOTFS: true - run: - path: build - - put: publish-image - params: - image: image/image.tar - - - name: "deploy-application" - plan: - - get: git-ds - passed: ["build-publish"] - - task: create-deploy-scripts - config: - platform: linux - image_resource: - type: docker-image - source: {repository: busybox} - inputs: - - name: git-ds - outputs: - - name: deploy-files - run: - path: git-ds/deployment_scripts/copy_files.sh - - - put: k8s - params: - status_check_timeout: 60 - command_timeout: 30 - paths: - - deploy-files/datasource_service/deployment.yaml - - deploy-files/datasource_service/service.yaml - - deploy-files/datasource_service/autoscaler.yaml - watch_resources: - - name: datasource-deployment - kind: Deployment - ``` - - -##### Example view of concurse dashboard -Screen Shot 2022-04-21 at 3 24 38 PM - - -##### Why Concourse? - - The beauty of the concourse is the declarative appraoch and the flexibility to automate the builds the way we want to using the resources, jobs and task concpets - - We don't need ansible or any other tool to automate the deployments since concourse supports it through the resources - - Concourse can be run as individual standalone cluster or as a distributed system as service inside the kubernetes cluster, which will scale horizontally as the number of pipeline or the job increses, and this is a very big advantage comparing to Jenkins or any other tools - - -##### For more details on [concourse](https://concourse-ci.org/docs.html) +- Custos set up + - Set up rancher + - Create K8S cluster + - Follow [custos](custos) guide for more details on installation diff --git a/deployment_scripts/concourse_cd/README.md b/deployment_scripts/concourse_cd/README.md new file mode 100644 index 0000000..f61209b --- /dev/null +++ b/deployment_scripts/concourse_cd/README.md @@ -0,0 +1,155 @@ +## Goal: Continuous deployment using Concourse CI/CD tool + +TL;DR + +### Pipelines + +```api-gateway``` | ```data-source``` | ```user-management``` | ```merra-data-source``` + +- Pipeline for each service resides under directory ``deployment_scripts/concourse_cd`` + +#### Concourse Installation + +- We are using concourse as a service +- Concourse is deployed in K8S using helm chart +- ``$ helm repo add concourse https://concourse-charts.storage.googleapis.com/`` +- Create PV for storage + ``$ kubectl apply -f pv.yaml`` +- ``` kubernetes helm + helm install concourse concourse/concourse \ + --set persistence.worker.size=2Gi \ + --set persistence.worker.storageClass=manual \ + --set postgresql.persistence.size=2Gi \ + --set postgresql.persistence.storageClass=manual \ + --set web.service.type=NodePort + ``` +- Create K8S cluster secrets for concourse to connect to K8S cluster for deployment + + +#### Concourse definitions + +- Define resource types and resources + + : Resource Type - + - Kubernetes docker resource to deploy the applications to k8s cluster + - We used resource type {kudohn/concourse-k8s-resource} + ```yaml + resource_types: + - name: k8s + type: docker-image + source: + repository: kudohn/concourse-k8s-resource + ``` + + + : Resources - + - We need explicitly define the resource types, which are declared before we can use them. so we are going to tell concourse that we want to use kubernetes external resource ``{kudohn/concourse-k8s-resource}`` + - we are using ```{git}``` resource supported by concourse which will pull the code from code repositories and we are using gitHub in our case + - We are using ```{registry-image}``` resource to build and publish our docker image to docker-hub + - We are using ``` {k8s} ``` reource, as defined above in the resource type section to connect to k8s cluster and deploy applications + ```yaml + resources: + - name: git-ds + type: git + icon: git + source: + uri: https://github.com/airavata-courses/DCoders.git + branch: feature/30-implement-cd + + # Where we will push the image + - name: publish-image + type: registry-image + icon: docker + source: + repository: vinayakasgadag/decoders-datasource + tag: milestone-3 + username: vinayakasgadag + password: ((k8s-secret.docker-hub-pwd)) + + - name: k8s + type: k8s + icon: kubernetes + source: + api_server_url: ((k8s-secret.k8s-cluster-url)) + api_server_cert: ((k8s-secret.api_server_cert)) + client_cert: ((k8s-secret.client_cert)) + client_key: ((k8s-secret.client_key)) + skip_tls_verify: false + ``` + + : Jobs - + - Jobs are resposible for creating the required tasks (defined in job definition) to run the pipeline + - Jobs have stages(plan) and tasks + - Stage or plan: it can be defined to be dependent on previous stage and run only if it is successful + - Task: Useful when we want to run some scripts to achieve somehting before the job can be run + ```yaml + jobs: + - name: "build-publish" + public: true + serial: true + plan: + - get: git-ds + trigger: true + - task: build-image-task + privileged: true + config: + platform: linux + image_resource: + type: registry-image + source: + repository: vito/oci-build-task + inputs: + - name: git-ds + outputs: + - name: image + params: + CONTEXT: git-ds/datasource_service + UNPACK_ROOTFS: true + run: + path: build + - put: publish-image + params: + image: image/image.tar + + - name: "deploy-application" + plan: + - get: git-ds + passed: ["build-publish"] + - task: create-deploy-scripts + config: + platform: linux + image_resource: + type: docker-image + source: {repository: busybox} + inputs: + - name: git-ds + outputs: + - name: deploy-files + run: + path: git-ds/deployment_scripts/copy_files.sh + + - put: k8s + params: + status_check_timeout: 60 + command_timeout: 30 + paths: + - deploy-files/datasource_service/deployment.yaml + - deploy-files/datasource_service/service.yaml + - deploy-files/datasource_service/autoscaler.yaml + watch_resources: + - name: datasource-deployment + kind: Deployment + ``` + + +##### Example view of concurse dashboard +Screen Shot 2022-04-21 at 3 24 38 PM + + +##### Why Concourse? + - The beauty of the concourse is the declarative appraoch and the flexibility to automate the builds the way we want to using the resources, jobs and task concpets + - We don't need ansible or any other tool to automate the deployments since concourse supports it through the resources + - Concourse can be run as individual standalone cluster or as a distributed system as service inside the kubernetes cluster, which will scale horizontally as the number of pipeline or the job increses, and this is a very big advantage comparing to Jenkins or any other tools + + +##### For more details on [concourse](https://concourse-ci.org/docs.html) diff --git a/deployment_scripts/concourse_cd/k8s-secrets.yml b/deployment_scripts/concourse_cd/k8s-secrets.yml new file mode 100644 index 0000000..a7d86ba --- /dev/null +++ b/deployment_scripts/concourse_cd/k8s-secrets.yml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: k8s-secret +data: + api_server_cert: | + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQzRENDQXNTZ0F3SUJBZ0lCQWpBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJeU1EVXdNakE0TkRVeE4xb1hEVEkxTURVd01qQTRORFV4TjFvd0xERVhNQlVHQTFVRQpDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhFVEFQQmdOVkJBTVRDRzFwYm1scmRXSmxNSUlCSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF6S0RsdS84MGZMZXA0aGtqYWkzS1pSZ3hRbEVFWGR5LzdVMHkKMUJpQkpmTGNjRC9LdHJSZFN5QkhMdnNVNDI5VHFMVEVqVEpIR3hyZEhySGJ4OWpIamUvU2VPSWhqZnJvcThBKwpPM1ExbXNtSkxBemM2UGlTMWVicFFLWEVvNlJEUGJ1S3Era2NPZnJrWjQxVmlKcjdWODZ6aGNISVRoK2xVQmZJCjdOZ0xUMW4zaUd4czhPRmlTcDQzbzEyM1JFWDhIMnhiZkE1SEtmdlNMS01CcWwxQ3pWbnJodXRhWWU0VWIxeDkKdTRibHRLb28zN1hkMlVNakNCOWUvb2wxN2YzSjFQZmRvNDNBd1RoTXMvTkZ5Z2M4bU8zck5iaG5ZU1d1QVZKaQplZUl0bVlrOFI4MmtOdmpheVp2OGR6N2JPSW45RDZOcndIekpIR2kwUFlyRmFNbVhtd0lEQVFBQm80SUJIakNDCkFSb3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0QKQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkI2VjJNVm1hNG9MYlFrNHNMRk45Qmxvdkh0dQpNSUc1QmdOVkhSRUVnYkV3Z2E2Q0NtMXBibWxyZFdKbFEwR0NIMk52Ym5SeWIyd3RjR3hoYm1VdWJXbHVhV3QxClltVXVhVzUwWlhKdVlXeUNKR3QxWW1WeWJtVjBaWE11WkdWbVlYVnNkQzV6ZG1NdVkyeDFjM1JsY2k1c2IyTmgKYklJV2EzVmlaWEp1WlhSbGN5NWtaV1poZFd4MExuTjJZNElTYTNWaVpYSnVaWFJsY3k1a1pXWmhkV3gwZ2dwcgpkV0psY201bGRHVnpnZ2xzYjJOaGJHaHZjM1NIQk1Db01RS0hCQXBnQUFHSEJIOEFBQUdIQkFvQUFBRXdEUVlKCktvWklodmNOQVFFTEJRQURnZ0VCQUxaMXIzbW5WZndiR21nS2lzSzhDekRFTDZjMFhIb2ZaenNia1c5MXpFb08KMjdlRGVMSVA2ZStLeXJndDJscllVcTAyeEgwQk11Y2NWUDR5cEJyRmZXdWgybE9rdWNtZEl2amE5SExOcEs0bwpmcHU5bjJPVHpHdVRsVnRrWUNLcEFXMURxSnlhblV4NC92MjYwWkV4OFdXbWp6aEtQbnlRakkrVnBmSzZva3pVCnQxSldZcVB1d3NtOEg3b0pOZTFUZU1rRkdJZ2haV3A4cngzTXZHK0V2WEtSTkE5OStHN3V2bEtMQ2E3eWYrMS8KSjRzcngwNzNxSHBpMTZ4RGFZcEsydFh6ek4xdVBWaGhla0YxMHV2dzIxVStGdEJIQWhoWXNZN3BVRWZ0TWpEbgpUbmJLaUFjNEM0NUsvRXpQQ0VDL2FFenBkZHI5ZWZiSkJYMTVOOHJrYkJvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + client_key: | + LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcXAzM3V5ZHRhSDZLcUpEUy8wUUJtbjc1VVBhMncxa2EzTW15WWlpQnhCZStnMFloCkNGcC9RWHpXNjRyM2o1aFBFait5NzFSUXNqRzdMakVHaDhmNzBWSE4rSTJrRDlWbndlOFUyZnNzVzh0anQ4SksKV1NQa01SYTlkVlV0WlhoVGRtSFkyZzlqNzdrLzB0L1BvUkJtbHBIMjB2ZjVUQ1BqbDFrRllyYmRpck1uMWNuRApna0YzMklWUWFDMmtHVXlzL25SVVNnZ2FzMUUxYmo4TElha3Q5Z1Zta3JEdjZvaVFNV3QxVVRhSWMwbFV5NzJwCmdFNjhBTFlwOS9ZYmE3RFdON2I2ZEpSVWxiWkdCR1RmZ1VPVkcybXdUYjl4aTgzckIwQVBFeGlBbW00bVJiNEcKem5tTGxCSFB4bjYvWGRUbXhSdFR1NUFpSVplTGxLYUJjRWIyeXdJREFRQUJBb0lCQUY3NU9ncHJJeUwwdGJpcApqVnBjLzh1QmZNVU40S1NUT0RuSTZNeHRJZmNIQkp6TWI5elhpMWpuNWpjTmowcldqTVZxd2U4cGJ4WVNTdENtCnA2enpySUJUV2lWT3F4SEpTRTJUQ1hkaHNzcVNTRHJsSXovRmsrT2pkZWtYZGdLeTNUcmJzcnVIcjZpazVSczgKVExhcWk0Q3JWY3NRRUN6TEdZaUpIRTliM2F5enJ0bWk2U0Z6b1ZadE5FZ0RBTlUzaFdWS3F0ekdtZDdoT245dgpwNS9JaHVsRjdodFRFaVdVbjgyNFdrRHhyQ2NMQkRDMXBhTmwydUVsMENwdjcvUW5Za3M3THNFeTFGaWVoMkNDCnN3Tnd5REp1N211dGpMVGN2T013Q2ppSlJjZHFxN0tGN3hycmt0MTMvNCtzdngrbVVpQU5hVGRFQzd3WVNJR3cKdVpNZVpOa0NnWUVBeWE1YkJTUEo0UGxMTW9FdTNJYWpxeS9hcUI5U0lDOE5EQlBJWkNQTU5FRHRIY0E2dEhFQQpYWmtBMGJwQkhiNllPK2V6SkdlWHVQYkJoMW5rSlpSSEVRUHY4QkJ6VGZwbkh3VW9pRzFQdUhyZ2JXTG9RTUtKClJPN2ZDNFppd2p2YmFGV2pIUHVFekIyNzI5aTNIcWFMWlVNWXZBVHplMTZ2c21PNkozWnhueDhDZ1lFQTJKSEsKREV0Tjc1SW0wall2SGI2U2dWQkptbnl0WW9Lc01PMXRRejV6SitrNlZrWElwQXJ3cnUwb01TQ3BJV2xkeVJ6SQoxNUkzaE1oZDJCdmx4cjJINHJvWnJWRTc5RjlZeDdqZlI0VDlVUlhtaGVUSTZpSHpTTXpSRHByZUU4Qnc2UllVCkg0a3ZWSXVyWWNKN0J4NE5tYS9nK0hwVXZwNk9QeU91RExBYUx0VUNnWUJuRVNlSzFPTlNpWlFZVjFSdmRvOGwKNk9yQmlHQWIrbStjZ0cra1hYYjZMVVFBTkVETC9nUEYwVzlOdnZXUUVkc1NvakkycElveENFbVd0aVdWM3RVQwpxUlJ4aHJhbVh4VmNFUExKNWJNY0FBKzVWeGFDSWVpc3hiWk8yWHFXOEtnTUJkZTU4Ly9Gb0Z4azJiZWJmbGsyCmdyZWRQcHAvcmIvMFZtckh5QXdBMlFLQmdRQzN4bmM3R1lmb0hSQ2VYMlo2Q2lhT1gxQW1MVmlBZUx5ZnhFcHMKdm9pL3ZIVkprbXdoY0RzdlpZWXVzalZ6YWRNdy95RWJkVE54bFFtMWdtN295QnFRZGpXbDBvSmE2N0lOd1Q2UApsVFhVNGcyOVh4aHpQaDRSaitSelRVM1lXdncxZnd2U2V2cFQ5eldXZm84aHlnbm1lYzRoYk1XUEFFTmJKdTdpClMybmNoUUtCZ0JHNlNVR1pSZmI1d1NUVTJFaGkybjU3TW12MGh6ZGxTdktFLzJrUUp5ZkVOQUZTU1p5bVgwMm8KRlQ0Q2hraFBBb1BXd3RLSGVLZHF2ZEJIQ3RZbEJVWDZSZk1GelJmc3c0OEdmMTVJQVU2MmgyS3MxaUdwN0Q1eQpJdFFPRUVEMk1CdENVZDYwblNsWmo4OTFvMFpmVXFrQjNvckZTRE9JbktnQllBNWxBOER2Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0t + client_cert: | + LS0tLUJFR0lOQ0VSVElGSUNBVEUtLS0tLQpNSUlESVRDQ0FnbWdBd0lCQWdJQkFqQU5CZ2txaGtpRzl3MEJBUXMKRkFEQVZNUk13RVFZRFZRUURFd3B0YVc1cGEzVmlaVU5CTUI0WERUSXlNRFV3TWpBNE5EVXhObG9YRFRJMU1EVQp3TWpBNE5EVXhObG93TVRFWE1CVUdBMVVFQ2hNT2MzbHpkR1Z0T20xaGMzUmxjbk14RmpBVUJnTlZCQU1URFcxCnBibWxyZFdKbExYVnpaWEl3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQ3FuZmUKN0oyMW9mb3Fva05ML1JBR2FmdmxROXJiRFdScmN5YkppS0lIRUY3NkRSaUVJV245QmZOYnJpdmVQbUU4U1A3TAp2VkZDeU1ic3VNUWFIeC92UlVjMzRqYVFQMVdmQjd4VForeXhieTJPM3drcFpJK1F4RnIxMVZTMWxlRk4yWWRqCmFEMlB2dVQvUzM4K2hFR2FXa2ZiUzkvbE1JK09YV1FWaXR0MktzeWZWeWNPQ1FYZlloVkJvTGFRWlRLeitkRlIKS0NCcXpVVFZ1UHdzaHFTMzJCV2FTc08vcWlKQXhhM1ZSTm9oelNWVEx2YW1BVHJ3QXRpbjM5aHRyc05ZM3R2cAowbEZTVnRrWUVaTitCUTVVYmFiQk52M0dMemVzSFFBOFRHSUNhYmlaRnZnYk9lWXVVRWMvR2ZyOWQxT2JGRzFPCjdrQ0lobDR1VXBvRndSdmJMQWdNQkFBR2pZREJlTUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakEKVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSEF3SXdEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCUQplbGRqRlptdUtDMjBKT0xDeFRmUVphTHg3YmpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQWhQQUlRaHRwSkxPCjdISjcvSnAySEtBM3BZWmxodmUza1k4bUw1a1Y2VjRaOVpSSk9IbGRZUnZjaUxsSlg3c0tua3R5d3JuZ3kvNWkKY0JydElkQXhoNmtqYUR0L1RLTmU3Yy84emJqaVhNZUN4TGh5YllHbVJOb3F3U3p6SmdtaWZHeXVBMmo3VEpPYgpWMkNhR0FRNjhFcHNjWHZXOFROb0VkazdDWThaRjh6ZTg5TFZFMXhGY1IvUXZUNnNRVUlZSThtMHdOMUVmOTNKCkN0VmhYQWRhQ1VXbkNhYUxSQ2xVWkdtVGo4YjNGZzYzbTFMYjgzdW9UOXJjNWdYdkRBNmw4b3BqVmZDWjFFODEKbjR3dFJTMDBrdlZ0VHhEM3RPdGNYWkx5czlBdWpSRkFoYXE3a2ZEc1REK21XNUp1ZjQyNCs0V0RxT21ud0dYWgp2MFFkTGFndXlRQT09Ci0tLS0tRU5EQ0VSVElGSUNBVEUtLS0tLQ== diff --git a/deployment_scripts/namespace.yaml b/deployment_scripts/concourse_cd/namespace.yaml similarity index 100% rename from deployment_scripts/namespace.yaml rename to deployment_scripts/concourse_cd/namespace.yaml diff --git a/deployment_scripts/concourse_cd/pv.yaml b/deployment_scripts/concourse_cd/pv.yaml new file mode 100644 index 0000000..a1ef179 --- /dev/null +++ b/deployment_scripts/concourse_cd/pv.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: concourse-pv-1 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: concourse-pv-2 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/concourse-work-dir" +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: concourse-pv-3 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" diff --git a/deployment_scripts/concourse_cd/pv1.yaml b/deployment_scripts/concourse_cd/pv1.yaml new file mode 100644 index 0000000..1a91a2c --- /dev/null +++ b/deployment_scripts/concourse_cd/pv1.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: concourse-pv +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Delete + storageClassName: local-storage + local: + path: /mnt/disks/ssd1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - master + - worker1 diff --git a/deployment_scripts/custos/README.md b/deployment_scripts/custos/README.md new file mode 100644 index 0000000..7df487a --- /dev/null +++ b/deployment_scripts/custos/README.md @@ -0,0 +1,11 @@ +### Custos set up on Jetstream1 +Follow the below order to set up custos on kubernetes +- Set up rancher +- Create K8S cluster + +#### Create and deploy below backing services +- [Cert-Manager](cert-manager) +- [keycloak](keycloak) +- [consul](consul) +- [vault](vault) +- [mysql](mysql) \ No newline at end of file diff --git a/deployment_scripts/custos/cert-manager/README.md b/deployment_scripts/custos/cert-manager/README.md new file mode 100644 index 0000000..a20202b --- /dev/null +++ b/deployment_scripts/custos/cert-manager/README.md @@ -0,0 +1,7 @@ +### Deploy Cert Manager +- ```kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.yaml``` + + [cert-manager](https://cert-manager.io/docs/installation/kubectl/) + +- Create Cluster Issuer + ```kubectl apply -f issuer.yaml``` diff --git a/deployment_scripts/custos/cert-manager/issuer.yaml b/deployment_scripts/custos/cert-manager/issuer.yaml new file mode 100644 index 0000000..4952293 --- /dev/null +++ b/deployment_scripts/custos/cert-manager/issuer.yaml @@ -0,0 +1,19 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: vgadag@iu.edu + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: acme-prod-private-key + # Add a single challenge solver, HTTP01 using nginx + solvers: + - http01: + ingress: + class: nginx diff --git a/deployment_scripts/custos/consul/README.md b/deployment_scripts/custos/consul/README.md new file mode 100644 index 0000000..4a2e21a --- /dev/null +++ b/deployment_scripts/custos/consul/README.md @@ -0,0 +1,19 @@ +### Deploy and configure consul + +#### Add hashicorp help repo +``` kubernetes helm + helm repo add hashicorp https://helm.releases.hashicorp.com + helm repo update +``` + +#### Create directory ``/hashicorp/consul/data`` in all the nodes + - sudo mkdir -p /hashicorp/consul/data + - change the permissions sudo chmod 777 -R /hashicorp + +#### Run the below commands +``` kubernetes helm + kubectl apply -f pv.yaml,pv1.yaml + kubectl apply -f storage.yaml + kubectl create namespace vault + helm install consul hashicorp/consul --version 0.31.1 -n vault --values config.yaml +``` diff --git a/deployment_scripts/custos/consul/config.yaml b/deployment_scripts/custos/consul/config.yaml new file mode 100644 index 0000000..23213d4 --- /dev/null +++ b/deployment_scripts/custos/consul/config.yaml @@ -0,0 +1,3 @@ +server: + replicas: 2 + storage: 4Gi diff --git a/deployment_scripts/custos/consul/pv.yaml b/deployment_scripts/custos/consul/pv.yaml new file mode 100644 index 0000000..b9814d5 --- /dev/null +++ b/deployment_scripts/custos/consul/pv.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-consul-pv0 + labels: + type: local +spec: + storageClassName: local-storage + capacity: + storage: 4Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + claimRef: + namespace: vault + name: data-vault-consul-consul-server-0 + hostPath: + path: "/hashicorp/consul" diff --git a/deployment_scripts/custos/consul/pv1.yaml b/deployment_scripts/custos/consul/pv1.yaml new file mode 100644 index 0000000..5604a90 --- /dev/null +++ b/deployment_scripts/custos/consul/pv1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-consul-pv1 + labels: + type: local +spec: + storageClassName: local-storage + capacity: + storage: 4Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + claimRef: + namespace: vault + name: data-vault-consul-consul-server-1 + hostPath: + path: "/hashicorp/consul" diff --git a/deployment_scripts/custos/consul/pv2.yaml b/deployment_scripts/custos/consul/pv2.yaml new file mode 100644 index 0000000..1637a54 --- /dev/null +++ b/deployment_scripts/custos/consul/pv2.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-consul-pv2 + labels: + type: local +spec: + storageClassName: local-storage + capacity: + storage: 4Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + claimRef: + namespace: vault + name: data-vault-consul-server-2 + hostPath: + path: "/hashicorp/consul" diff --git a/deployment_scripts/custos/consul/storage_class.yaml b/deployment_scripts/custos/consul/storage_class.yaml new file mode 100644 index 0000000..e6ce6fb --- /dev/null +++ b/deployment_scripts/custos/consul/storage_class.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/deployment_scripts/custos/keycloak/README.md b/deployment_scripts/custos/keycloak/README.md new file mode 100644 index 0000000..d8fc6f0 --- /dev/null +++ b/deployment_scripts/custos/keycloak/README.md @@ -0,0 +1,16 @@ +### Deploy Postgresql + - First step in the keycloak deployment process is to install Postgresql + +#### Create PersistentVolumes + - ``` kubernetes helm + kubectl apply -f pv.yaml + kubectl apply -f pv1.yaml + kubectl apply -f pv2.yaml + ``` + - Deploy Postgresql + ``` kubernetes helm + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo update + kubectl create namespace keycloak + helm install keycloak-db-postgresql bitnami/postgresql -f postgresql-values.yaml -n keycloak --version 10.12.3 + ``` diff --git a/deployment_scripts/custos/keycloak/custos-keycloak.yaml b/deployment_scripts/custos/keycloak/custos-keycloak.yaml new file mode 100644 index 0000000..9e1093b --- /dev/null +++ b/deployment_scripts/custos/keycloak/custos-keycloak.yaml @@ -0,0 +1,12 @@ +apiVersion: keycloak.org/v1alpha1 +kind: Keycloak +metadata: + name: custos-keycloak + labels: + app: custos-keycloak +spec: + externalDatabase: + enabled: True + instances: 1 + externalAccess: + enabled: False diff --git a/deployment_scripts/custos/keycloak/ingress.yaml b/deployment_scripts/custos/keycloak/ingress.yaml new file mode 100644 index 0000000..dc16d37 --- /dev/null +++ b/deployment_scripts/custos/keycloak/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: keycloak-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /auth/$2 + kubernetes.io/ingress.class: "nginx" + cert-manager.io/cluster-issuer: letsencrypt-prod +spec: + tls: + - hosts: + - js-169-150.jetstream-cloud.org + secretName: tls-keycloak-secret + + rules: + - host: js-169-150.jetstream-cloud.org + http: + paths: + - path: /auth(/|$)(.*) + backend: + serviceName: keycloak-discovery + servicePort: 8080 diff --git a/deployment_scripts/custos/keycloak/keycloak-db-secret.yaml b/deployment_scripts/custos/keycloak/keycloak-db-secret.yaml new file mode 100644 index 0000000..1b4d974 --- /dev/null +++ b/deployment_scripts/custos/keycloak/keycloak-db-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: keycloak-db-secret + namespace: keycloak +stringData: + POSTGRES_DATABASE: postgresDB + POSTGRES_EXTERNAL_ADDRESS: keycloak-db-postgresql.keycloak.svc.cluster.local + POSTGRES_EXTERNAL_PORT: "5432" + POSTGRES_PASSWORD: postgres + POSTGRES_SUPERUSER: "true" + POSTGRES_USERNAME: postgres +type: Opaque diff --git a/deployment_scripts/custos/keycloak/operator.yaml b/deployment_scripts/custos/keycloak/operator.yaml new file mode 100644 index 0000000..ee8428c --- /dev/null +++ b/deployment_scripts/custos/keycloak/operator.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keycloak-operator +spec: + replicas: 1 + selector: + matchLabels: + name: keycloak-operator + template: + metadata: + labels: + name: keycloak-operator + spec: + serviceAccountName: keycloak-operator + containers: + - name: keycloak-operator + # Replace this with the built image name + image: quay.io/keycloak/keycloak-operator:master + command: + - keycloak-operator + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + value: "keycloak" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "keycloak-operator" + - name: RELATED_IMAGE_KEYCLOAK + value: "apachecustos/custos-keycloak:1.0" diff --git a/deployment_scripts/custos/keycloak/postgresql/postgresql-values.yaml b/deployment_scripts/custos/keycloak/postgresql/postgresql-values.yaml new file mode 100644 index 0000000..4bd8719 --- /dev/null +++ b/deployment_scripts/custos/keycloak/postgresql/postgresql-values.yaml @@ -0,0 +1,829 @@ +# Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.11.0-debian-10-r50 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + +## String to partially override common.names.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override common.names.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: true + user: postgres + password: postgres + readReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'on' + ## From the number of `readReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > readReplicas + numSynchronousReplicas: 2 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: postgres_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +postgresqlPostgresPassword: postgres + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlPassword: postgres + + + + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +postgresqlDatabase: postgresDB + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's primary server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +primaryAsStandBy: + enabled: false + # primaryHost: + # primaryPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: '' + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start primary and read(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ignored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + storageClass: manual + accessModes: + - ReadWriteOnce + size: 5Gi + annotations: {} + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + +## updateStrategy for PostgreSQL StatefulSet and its reads StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Primary parameters +## +primary: + ## PostgreSQL Primary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## PostgreSQL Primary pod anti-affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: soft + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "postgresql.role" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: + - primary + + ## Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + + ## Additional PostgreSQL primary Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL primary Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for primary + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL read only replica parameters +## +readReplicas: + ## PostgreSQL read only pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## PostgreSQL read only pod anti-affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: soft + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "postgresql.role" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: + - slave + + ## Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: readReplicas.podAffinityPreset, readReplicas.podAntiAffinityPreset, and readReplicas.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + + ## Additional PostgreSQL read replicas Volume mounts + ## + extraVolumeMounts: [] + + ## Additional PostgreSQL read replicas Volumes + ## + extraVolumes: [] + + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for read + ## + service: {} + # type: + # nodePort: + # clusterIP: + + ## Whether to enable PostgreSQL read replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for read replicas + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 100Mi + cpu: 100m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for startup, liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 5 + failureThreshold: 10 + successThreshold: 1 + +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Startup probe +## +customStartupProbe: {} + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "common.names.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.9.0-debian-10-r23 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: [] + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] diff --git a/deployment_scripts/custos/keycloak/postgresql/pv.yaml b/deployment_scripts/custos/keycloak/postgresql/pv.yaml new file mode 100644 index 0000000..3db70b5 --- /dev/null +++ b/deployment_scripts/custos/keycloak/postgresql/pv.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: task-pv1 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/bitnami/postgresql" diff --git a/deployment_scripts/custos/keycloak/postgresql/pv1.yaml b/deployment_scripts/custos/keycloak/postgresql/pv1.yaml new file mode 100644 index 0000000..fd07043 --- /dev/null +++ b/deployment_scripts/custos/keycloak/postgresql/pv1.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: task-pv2 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/bitnami/postgresql" diff --git a/deployment_scripts/custos/keycloak/postgresql/pv2.yaml b/deployment_scripts/custos/keycloak/postgresql/pv2.yaml new file mode 100644 index 0000000..a90c48e --- /dev/null +++ b/deployment_scripts/custos/keycloak/postgresql/pv2.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: task-pv3 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/bitnami/postgresql" diff --git a/deployment_scripts/custos/mysql/README.md b/deployment_scripts/custos/mysql/README.md new file mode 100644 index 0000000..9454112 --- /dev/null +++ b/deployment_scripts/custos/mysql/README.md @@ -0,0 +1,8 @@ +### Deploy MySQL +- Create volumes +- ``kubectl apply -f pv.yaml,pv1.yaml`` +- Install MySQL using helm + ``` kubernetes helm + kubectl create namespace custos + helm install mysql bitnami/mysql -f values.yaml -n custos --version 8.8.8 + ``` diff --git a/deployment_scripts/custos/mysql/pv.yaml b/deployment_scripts/custos/mysql/pv.yaml new file mode 100644 index 0000000..b78fcbe --- /dev/null +++ b/deployment_scripts/custos/mysql/pv.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: mysql1 + labels: + type: local +spec: + storageClassName: "" + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/bitnami/mysql" diff --git a/deployment_scripts/custos/mysql/pv1.yaml b/deployment_scripts/custos/mysql/pv1.yaml new file mode 100644 index 0000000..8f67668 --- /dev/null +++ b/deployment_scripts/custos/mysql/pv1.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: mysql2 + labels: + type: local +spec: + storageClassName: "" + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/bitnami/mysql" diff --git a/deployment_scripts/custos/mysql/values.yaml b/deployment_scripts/custos/mysql/values.yaml new file mode 100644 index 0000000..06afea1 --- /dev/null +++ b/deployment_scripts/custos/mysql/values.yaml @@ -0,0 +1,841 @@ +## Global Dockeir image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MySQL image +## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## +image: + registry: docker.io + repository: bitnami/mysql + tag: 8.0.22-debian-10-r23 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override common.names.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override common.names..fullname template +## +# fullnameOverride: + +## Cluster domain +## +clusterDomain: cluster.local + +## Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} + +## Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## MySQL architecture. Allowed values: standalone or replication +## +architecture: replication + +## MySQL Authentication parameters +## +auth: + ## MySQL root password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run + ## + rootPassword: JohnWickILoveMyDog + ## MySQL custom user and database + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-on-first-run + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-user-on-first-run + ## + database: custos_metadata + username: root + password: JohnWickILoveMyDog + ## MySQL replication user and password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## + replicationUser: custos_replicator + replicationPassword: JohnWickILoveMyDog + ## Existing secret with MySQL credentials + ## NOTE: When it's set the previous parameters are ignored. + ## + # existingSecret: name-of-existing-secret + ## Force users to specify required passwords + ## + forcePassword: true + ## Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: true + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} + +## Existing ConfigMap with custom init scripts +## +# initdbScriptsConfigMap: + +## MySQL Primary parameters +## +primary: + ## Command and args for running the container (set to default if not set). Use array form + ## + command: [] + args: [] + + ## Configure MySQL Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + max_connections=1000 + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## Name of existing ConfigMap with MySQL Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + # existingConfiguration: + + ## updateStrategy for MySQL Primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + + ## Partition update strategy for MySQL Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + # rollingUpdatePartition: + + ## MySQL Primary pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## MySQL Primary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## MySQL Primary pod anti-affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## MySQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: soft + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: custos.mysql.role + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: + - primary + + ## Affinity for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MySQL primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + + ## MySQL primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MySQL primary container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 250m + requests: {} + # memory: 256Mi + # cpu: 250m + + ## MySQL primary container's liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## MySQL primary custom liveness probe + ## + customLivenessProbe: {} + + ## MySQL primary custom rediness probe + ## + customReadinessProbe: {} + + ## MySQL primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + + ## An array to add extra environment variables on MySQL primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars for MySQL primary containers: + ## + extraEnvVarsCM: "" + + ## Secret with extra env vars for MySQL primary containers: + ## + extraEnvVarsSecret: "" + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + mountPath: /bitnami/mysql + ## Name of existing PVC to hold MySQL Primary data + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + # existingClaim: + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 2Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Extra volumes to add to the MySQL Primary pod(s) + ## + extraVolumes: [] + + ## Extra volume mounts to add to the MySQL Primary container(s) + ## + extraVolumeMounts: [] + + ## Extra init containers to add to the MySQL Primary pod(s) + ## + initContainers: [] + + ## Extra sidecar containers to add to the MySQL Primary pod(s) + ## + sidecars: [] + + ## MySQL Primary Service paramaters + ## + service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Service clusterIP + ## + # clusterIP: None + clusterIP: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required + ## + annotations: {} + + ## MySQL primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## MySQL Secondary parameters +## +secondary: + ## Number of MySQL Secondary replicas to deploy + ## + replicaCount: 1 + + ## Command and args for running the container (set to default if not set). Use array form + ## + command: [] + args: [] + + ## Configure MySQL Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + max_connections=1000 + slave-skip-errors=1062 + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## Name of existing ConfigMap with MySQL Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + # existingConfiguration: + + ## updateStrategy for MySQL Secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + + ## Partition update strategy for MySQL Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + # rollingUpdatePartition: + + ## MySQL Secondary pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## MySQL Secondary pod affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAffinityPreset: "" + + ## MySQL Secondary pod anti-affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + + ## MySQL Secondary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + type: soft + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: custos.mysql.role + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: + - secondary + + ## Affinity for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + + ## Node labels for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MySQL secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + + ## MySQL secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + + ## MySQL secondary container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 250m + requests: {} + # memory: 256Mi + # cpu: 250m + + ## MySQL secondary container's liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## MySQL secondary custom liveness probe + ## + customLivenessProbe: {} + + ## MySQL secondary custom rediness probe + ## + customReadinessProbe: {} + + ## MySQL secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + + ## An array to add extra environment variables on MySQL secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars for MySQL secondary containers: + ## + extraEnvVarsCM: "" + + ## Secret with extra env vars for MySQL secondary containers: + ## + extraEnvVarsSecret: "" + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + mountPath : /bitnami/mysql + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Persistent Volume Claim annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: 2Gi + ## selector can be used to match an existing PersistentVolume + ## selector: + ## matchLabels: + ## app: my-app + selector: {} + + ## Extra volumes to add to the MySQL Secondary pod(s) + ## + extraVolumes: [] + + ## Extra volume mounts to add to the MySQL Secondary container(s) + ## + extraVolumeMounts: [] + + ## Extra init containers to add to the MySQL Secondary pod(s) + ## + initContainers: [] + + ## Extra sidecar containers to add to the MySQL Secondary pod(s) + ## + sidecars: [] + + ## MySQL Secondary Service paramaters + ## + service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 3306 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Service clusterIP + ## + # clusterIP: None + clusterIP: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required + ## + annotations: {} + + ## MySQL secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + create: false + ## Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + # maxUnavailable: 1 + +## MySQL pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mysql.fullname template + ## + # name: + ## Annotations to add to the service account (evaluated as a template) + ## + annotations: {} + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## + create: false + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: stretch + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Mysqld Prometheus exporter paramters +## +metrics: + enabled: true + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r264 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## MySQL Prometheus exporter service parameters + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes ## + service: + type: ClusterIP + port: 9104 + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + + ## Extra args to be passed to mysqld_exporter + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + + ## Mysqld Prometheus exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # memory: 256Mi + # cpu: 100m + requests: {} + # memory: 256Mi + # cpu: 100m + + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 30s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + ## + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/deployment_scripts/custos/vault/README.md b/deployment_scripts/custos/vault/README.md new file mode 100644 index 0000000..ea74894 --- /dev/null +++ b/deployment_scripts/custos/vault/README.md @@ -0,0 +1,10 @@ +### Deploy Vault + +``` kubernetes helm + helm install vault hashicorp/vault --namespace vault -f values.yaml --version 0.10.0 +``` + - Update hostname in ingress.yaml to the master node hostname + + ``kubectl apply -f ingress.yaml -n vault`` + +#### Generate root token and key to initiate the vault instance diff --git a/deployment_scripts/custos/vault/ingress.yaml b/deployment_scripts/custos/vault/ingress.yaml new file mode 100644 index 0000000..8258728 --- /dev/null +++ b/deployment_scripts/custos/vault/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: vault-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod +spec: + tls: + - hosts: + - js-169-150.jetstream-cloud.org + secretName: tls-vault-secret + + rules: + - host: js-169-150.jetstream-cloud.org + http: + paths: + - path: / + backend: + serviceName: vault + servicePort: 8200 diff --git a/deployment_scripts/custos/vault/values.yaml b/deployment_scripts/custos/vault/values.yaml new file mode 100644 index 0000000..3439a13 --- /dev/null +++ b/deployment_scripts/custos/vault/values.yaml @@ -0,0 +1,731 @@ +# Available parameters and their default values for the Vault chart. + +global: + # enabled is the master enabled switch. Setting this to true or false + # will enable or disable all the components within this chart by default. + enabled: true + # Image pull secret to use for registry authentication. + imagePullSecrets: [] + # imagePullSecrets: + # - name: image-pull-secret + # TLS for end-to-end encrypted transport + tlsDisable: true + # If deploying to OpenShift + openshift: false + # Create PodSecurityPolicy for pods + psp: + enable: false + # Annotation for PodSecurityPolicy. + # This is a multi-line templated string map, and can also be set as YAML. + annotations: | + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + +injector: + # True if you want to enable vault agent injection. + enabled: true + + replicas: 1 + + # If multiple replicas are specified, by default a leader-elector side-car + # will be created so that only one injector attempts to create TLS certificates. + leaderElector: + enabled: true + image: + repository: "gcr.io/google_containers/leader-elector" + tag: "0.4" + ttl: 60s + + # If true, will enable a node exporter metrics endpoint at /metrics. + metrics: + enabled: false + + # External vault server address for the injector to use. Setting this will + # disable deployment of a vault server along with the injector. + externalVaultAddr: "" + + # image sets the repo and tag of the vault-k8s image to use for the injector. + image: + repository: "hashicorp/vault-k8s" + tag: "0.9.0" + pullPolicy: IfNotPresent + + # agentImage sets the repo and tag of the Vault image to use for the Vault Agent + # containers. This should be set to the official Vault image. Vault 1.3.1+ is + # required. + agentImage: + repository: "vault" + tag: "1.7.0" + + # Mount Path of the Vault Kubernetes Auth Method. + authPath: "auth/kubernetes" + + # Configures the log verbosity of the injector. Supported log levels: Trace, Debug, Error, Warn, Info + logLevel: "info" + + # Configures the log format of the injector. Supported log formats: "standard", "json". + logFormat: "standard" + + # Configures all Vault Agent sidecars to revoke their token when shutting down + revokeOnShutdown: false + + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: {} + + # Configures failurePolicy of the webhook. The "unspecified" default behaviour deoends on the + # API Version of the WebHook. + # To block pod creation while webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + certs: + # secretName is the name of the secret that has the TLS certificate and + # private key to serve the injector webhook. If this is null, then the + # injector will default to its automatic management mode that will assign + # a service account to the injector to generate its own certificates. + secretName: null + + # caBundle is a base64-encoded PEM-encoded certificate bundle for the + # CA that signed the TLS certificate that the webhook serves. This must + # be set if secretName is non-null. + caBundle: "" + + # certName and keyName are the names of the files within the secret for + # the TLS cert and private key, respectively. These have reasonable + # defaults but can be customized if necessary. + certName: tls.crt + keyName: tls.key + + # resources: {} + resources: + requests: + memory: 256Mi + # cpu: 250m + # limits: + # cpu: 250m + + # extraEnvironmentVars is a list of extra environment variables to set in the + # injector deployment. + extraEnvironmentVars: {} + # KUBERNETES_SERVICE_HOST: kubernetes.default.svc + + # Affinity Settings for injector pods + # This should be a multi-line string matching the affinity section of a + # PodSpec. + # Commenting out or setting as empty the affinity variable, will allow + # deployment of multiple replicas to single node services such as Minikube. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: webhook + topologyKey: kubernetes.io/hostname + + # Toleration Settings for injector pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: null + + # nodeSelector labels for injector pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: null + + # Priority class for injector pods + priorityClassName: "" + + # Extra annotations to attach to the injector pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the injector pods + annotations: {} + + # Extra labels to attach to the agent-injector + # This should be a YAML map of the labels to apply to the injector + extraLabels: {} + + # Injector service specific config + service: + # Extra annotations to attach to the injector service + annotations: {} + +server: + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec. + # By default no direct resource request is made. + + image: + repository: "vault" + tag: "1.7.0" + # Overrides the default Image Pull Policy + pullPolicy: IfNotPresent + + # Configure the Update Strategy Type for the StatefulSet + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategyType: "OnDelete" + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # Ingress allows ingress services to be created to allow external access + # from Kubernetes to access Vault pods. + # If deployment is on OpenShift, the following block is ignored. + # In order to expose the service, use the route section below + ingress: + enabled: false + labels: {} + # traffic: external + annotations: {} + # | + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # or + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # OpenShift only - create a route to expose the service + # The created route will be of type passthrough + route: + enabled: false + labels: {} + annotations: {} + host: chart-example.local + + # authDelegator enables a cluster role binding to be attached to the service + # account. This cluster role binding can be used to setup Kubernetes auth + # method. https://www.vaultproject.io/docs/auth/kubernetes.html + authDelegator: + enabled: true + + # extraInitContainers is a list of init containers. Specified as a YAML list. + # This is useful if you need to run a script to provision TLS certificates or + # write out configuration files in a dynamic way. + extraInitContainers: null + # # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder, + # # which is defined in the volumes value. + # - name: oauthapp + # image: "alpine" + # command: [sh, -c] + # args: + # - cd /tmp && + # wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz && + # tar -xf oauthapp.xz && + # mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp && + # chmod +x /usr/local/libexec/vault/oauthapp + # volumeMounts: + # - name: plugins + # mountPath: /usr/local/libexec/vault + + # extraContainers is a list of sidecar containers. Specified as a YAML list. + extraContainers: null + + # shareProcessNamespace enables process namespace sharing between Vault and the extraContainers + # This is useful if Vault must be signaled, e.g. to send a SIGHUP for log rotation + shareProcessNamespace: false + + # extraArgs is a string containing additional Vault server arguments. + extraArgs: "" + + # Used to define custom readinessProbe settings + readinessProbe: + enabled: true + # If you need to use a http path instead of the default exec + # path: /v1/sys/health?standbyok=true + + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to enable a livenessProbe for the pods + livenessProbe: + enabled: false + path: "/v1/sys/health?standbyok=true" + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 60 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Used to set the sleep time during the preStop step + preStopSleepSeconds: 5 + + # Used to define commands to run after the pod is ready. + # This can be used to automate processes such as initialization + # or boostrapping auth methods. + postStart: [] + # - /bin/sh + # - -c + # - /vault/userconfig/myscript/run.sh + + # extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be + # used to include variables required for auto-unseal. + extraEnvironmentVars: {} + # GOOGLE_REGION: global + # GOOGLE_PROJECT: myproject + # GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json + + # extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set. + # These variables take value from existing Secret objects. + extraSecretEnvironmentVars: [] + # - envName: AWS_SECRET_ACCESS_KEY + # secretName: vault + # secretKey: AWS_SECRET_ACCESS_KEY + + # Deprecated: please use 'volumes' instead. + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Vault in the path `/vault/userconfig//`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # path: null # default is `/vault/userconfig` + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: plugins + # emptyDir: {} + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - mountPath: /usr/local/libexec/vault + # name: plugins + # readOnly: true + + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: null + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: null + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + #type: ClusterIP + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + # This configures the Vault Statefulset to create a PVC for data + # storage when using the file or raft backend storage engines. + # See https://www.vaultproject.io/docs/configuration/storage/index.html to know more + dataStorage: + enabled: true + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/data" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + + # This configures the Vault Statefulset to create a PVC for audit + # logs. Once Vault is deployed, initialized and unseal, Vault must + # be configured to use this for audit logs. This will be mounted to + # /vault/audit + # See https://www.vaultproject.io/docs/audit/index.html to know more + auditStorage: + enabled: false + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/audit" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + + # Run Vault in "dev" mode. This requires no further setup, no state management, + # and no initialization. This is useful for experimenting with Vault without + # needing to unseal, store keys, et. al. All data is lost on restart - do not + # use dev mode for anything other than experimenting. + # See https://www.vaultproject.io/docs/concepts/dev-server.html to know more + dev: + enabled: false + + # Set VAULT_DEV_ROOT_TOKEN_ID value + devRootToken: "root" + + # Run Vault in "standalone" mode. This is the default mode that will deploy if + # no arguments are given to helm. This requires a PVC for data storage to use + # the "file" backend. This mode is not highly available and should not be scaled + # past a single replica. + standalone: + enabled: "-" + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data + # and store data there. This is only used when using a Replica count of 1, and + # using a stateful set. This should be HCL. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "file" { + path = "/vault/data" + } + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # Run Vault in "HA" mode. There are no storage requirements unless audit log + # persistence is required. In HA mode Vault will configure itself to use Consul + # for its storage backend. The default configuration provided will work the Consul + # Helm project by default. It is possible to manually configure Vault to use a + # different HA backend. + ha: + enabled: true + replicas: 1 + + # Set the api_addr configuration for Vault HA + # See https://www.vaultproject.io/docs/configuration#api_addr + # If set to null, this will be set to the Pod IP Address + apiAddr: null + + # Enables Vault's integrated Raft storage. Unlike the typical HA modes where + # Vault's persistence is external (such as Consul), enabling Raft mode will create + # persistent volumes for Vault to store data according to the configuration under server.dataStorage. + # The Vault cluster will coordinate leader elections and failovers internally. + raft: + + # Enables Raft integrated storage + enabled: false + # Set the Node Raft ID to the name of the pod + setNodeId: false + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + + storage "raft" { + path = "/vault/data" + } + + service_registration "kubernetes" {} + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a Consul for its HA storage backend. + # This should be HCL. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "consul" { + path = "vault" + address = "HOST_IP:8500" + } + + service_registration "kubernetes" {} + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev-246514" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + disruptionBudget: + enabled: true + + # maxUnavailable will default to (n/2)-1 where n is the number of + # replicas. If you'd like a custom value, you can specify an override here. + maxUnavailable: null + + # Definition of the serviceAccount used to run Vault. + # These options are also used when using an external Vault server to validate + # Kubernetes tokens. + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Settings for the statefulSet used to run Vault. + statefulSet: + # Extra annotations for the statefulSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the statefulSet. + annotations: {} + +# Vault UI +ui: + # True if you want to create a Service entry for the Vault UI. + # + # serviceType can be used to control the type of service created. For + # example, setting this to "LoadBalancer" will create an external load + # balancer (for supported K8S installations) to access the UI. + enabled: true + publishNotReadyAddresses: true + # The service should only contain selectors for active Vault pod + activeVaultPodOnly: false + serviceType: "ClusterIP" + serviceNodePort: null + externalPort: 8200 + + # loadBalancerSourceRanges: + # - 10.0.0.0/16 + # - 1.78.23.3/32 + + # loadBalancerIP: + + # Extra annotations to attach to the ui service + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the ui service + annotations: {} + +# secrets-store-csi-driver-provider-vault +csi: + # True if you want to install a secrets-store-csi-driver-provider-vault daemonset. + # + # Requires installing the secrets-store-csi-driver separately, see: + # https://github.com/kubernetes-sigs/secrets-store-csi-driver#install-the-secrets-store-csi-driver + # + # With the driver and provider installed, you can mount Vault secrets into volumes + # similar to the Vault Agent injector, and you can also sync those secrets into + # Kubernetes secrets. + enabled: false + + image: + repository: "hashicorp/vault-csi-provider" + tag: "0.1.0" + pullPolicy: IfNotPresent + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: tls + # secret: + # secretName: vault-tls + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - name: tls + # mountPath: "/vault/tls" + # readOnly: true + + resources: {} + # resources: + # requests: + # cpu: 50m + # memory: 128Mi + # limits: + # cpu: 50m + # memory: 128Mi + + # Settings for the daemonSet used to run the provider. + daemonSet: + updateStrategy: + type: RollingUpdate + maxUnavailable: "" + # Extra annotations for the daemonSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the daemonSet. + annotations: {} + pod: + # Extra annotations for the provider pods. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the pod. + annotations: {} + + serviceAccount: + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Used to configure readinessProbe for the pods. + readinessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to configure livenessProbe for the pods. + livenessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Enables debug logging. + debug: false diff --git a/deployment_scripts/metric_server.yaml b/deployment_scripts/metric_server.yaml deleted file mode 100644 index e69de29..0000000