From 1ea46609721e96422086f611508270bd95303f95 Mon Sep 17 00:00:00 2001 From: G33tha Date: Fri, 23 Sep 2022 16:53:09 +0530 Subject: [PATCH 001/203] removed copy-to-helm-public tasks from mount-keys role (#3549) Co-authored-by: G33tha --- ansible/roles/stack-sunbird/defaults/main.yml | 2 +- .../ansible/roles/helm-deploy/defaults/main.yml | 2 +- .../ansible/roles/helm-deploy/tasks/main.yml | 2 +- .../roles/mount-keys/tasks/copy-to-helm-public.yml | 14 -------------- 4 files changed, 3 insertions(+), 17 deletions(-) delete mode 100644 kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 12d98086b3..3b5946e333 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -982,7 +982,7 @@ adminutil_access_values: role_to_run: - decrypt.yml - generate-keys.yml - - copy-to-helm-public.yml + - copy-to-helm.yml # analytics-service related vars cassandra: diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index 0dcb0e8d3a..ed2c7f5aca 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -238,7 +238,7 @@ adminutil_access_values: role_to_run: - decrypt.yml - generate-keys.yml - - copy-to-helm-public.yml + - copy-to-helm.yml opa_policies_path: ../../../opa # Cannot remove common.rego from common_opa_policy_files yet diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index a0dba8e709..bd40bcfb82 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -31,7 +31,7 @@ private_key_prefix: "{{ outer_item.0.values_to_pass.keyprefix }}" private_key_sign_start: "{{ outer_item.0.values_to_pass.keystart }}" private_key_sign_end: "{{ outer_item.0.values_to_pass.keycount if outer_item.0.values_to_pass.keycount > '0' else '1' }}" - when: release_name == "adminutils" or release_name == "gotenberg" + when: release_name == "adminutils" with_subelements: - "{{adminutil_keys_values}}" - role_to_run diff --git a/kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml b/kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml deleted file mode 100644 index 4320bccfd3..0000000000 --- a/kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create the keys directory in case user is overriding the private_key_path - file: - path: "{{chart_path}}/keys" - state: directory - mode: 0755 - -- name: Copy keys directory to chart path - vars: - private_key_path: "{{private_key_path | regex_replace('^\\/|\\/$', '')}}" - copy: "src={{inventory_dir}}{{private_key_path}}{{private_key_prefix}}{{item}} dest={{chart_path}}/keys/" - with_sequence: start={{private_key_sign_start}} end={{private_key_sign_start|int + private_key_sign_end|int - 1}} stride={{private_key_sign_incr}} - when: (private_key_sign_start|int + private_key_sign_end|int - 1) > 0 - From 730798ca832c07ec937221b3d6fbbcdb502d65e0 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 8 Sep 2022 17:33:41 +0530 Subject: [PATCH 002/203] fix: install specific azure-cli version --- ansible/bootstrap.yml | 1 - ansible/roles/azure-cli/tasks/main.yml | 28 ++++++++++++++++++++------ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 429d8b7127..aba26fbbd4 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -22,7 +22,6 @@ - all - hosts: "{{hosts}}" - gather_facts: no become: yes ignore_unreachable: yes vars_files: diff --git a/ansible/roles/azure-cli/tasks/main.yml b/ansible/roles/azure-cli/tasks/main.yml index 484bf1f4fb..0a1148f2c4 100644 --- a/ansible/roles/azure-cli/tasks/main.yml +++ b/ansible/roles/azure-cli/tasks/main.yml @@ -1,9 +1,25 @@ -- name: install azure cli - become: yes - shell: - which az || curl -sL https://aka.ms/InstallAzureCLIDeb | bash -- name: install azcopy - become: yes +--- +- name: Add Microsfot signing key + ansible.builtin.apt_key: + url: https://packages.microsoft.com/keys/microsoft.asc + state: present + +- name: Add Microsfot repository into sources list + ansible.builtin.apt_repository: + repo: "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ {{ ansible_distribution_release | lower }} main" + state: present + +- name: Install azue cli and dependent packages + ansible.builtin.apt: + pkg: + - ca-certificates + - curl + - apt-transport-https + - lsb-release + - gnupg + - "azure-cli=2.33.1-1~{{ ansible_distribution_release | lower }}" + +- name: Install azcopy shell: | which azcopy || ( \ mkdir /tmp/azcopy && cd /tmp/azcopy && \ From 4773ffc261b91899047190011228404f336a72e2 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 9 Sep 2022 23:58:59 +0530 Subject: [PATCH 003/203] feat: updated plays to use azure role taks based on cloud provider Signed-off-by: Keshav Prasad --- ansible/artifacts-download.yml | 17 +++- ansible/artifacts-upload.yml | 18 +++- ansible/assets-upload.yml | 28 +++++- ansible/cert-file-upload.yml | 10 -- ansible/deploy-plugins.yml | 99 +++++++++++-------- ansible/desktop-faq-upload.yml | 93 ++++++++--------- ansible/dial_upload-schema.yml | 21 ++-- ansible/grafana-backup.yml | 3 - ansible/jenkins-backup.yml | 4 - ansible/kp_upload-schema.yml | 20 ++-- ansible/mongodb-backup.yml | 3 - ansible/nodebbui-upload.yml | 34 ++++--- ansible/plugin.yml | 22 ----- ansible/plugins.yml | 28 ++++++ ansible/prometheus-backup.yml | 8 -- ansible/prometheus-restore.yml | 9 -- ansible/redis-backup.yml | 3 - .../artifacts-download-azure/tasks/main.yml | 8 -- .../artifacts-upload-azure/tasks/main.yml | 8 -- .../assets-upload-azure/defaults/main.yml | 5 - .../roles/assets-upload-azure/tasks/main.yml | 25 ----- ansible/roles/azure-cli/tasks/main.yml | 6 +- .../azure-cloud-storage/defaults/main.yml | 67 +++++++++++++ .../tasks/blob-delete-batch.yml | 5 + .../tasks/blob-download.yml | 5 + .../tasks/blob-upload-batch.yml | 10 ++ .../azure-cloud-storage/tasks/blob-upload.yml | 10 ++ .../tasks/container-create.yml | 8 ++ .../tasks/delete-using-azcopy.yml | 7 ++ .../roles/azure-cloud-storage/tasks/main.yml | 21 ++++ .../tasks/upload-using-azcopy.yml | 12 +++ .../blob-batch-delete-azure/tasks/main.yml | 8 -- ansible/roles/cassandra-backup/meta/main.yml | 2 - ansible/roles/cassandra-backup/tasks/main.yml | 34 +++---- ansible/roles/cassandra-restore/meta/main.yml | 2 - .../roles/cassandra-restore/tasks/main.yml | 45 ++++----- .../roles/cert-file-upload/defaults/main.yml | 2 - ansible/roles/cert-file-upload/tasks/main.yml | 7 -- ansible/roles/cert-templates/tasks/main.yml | 27 +++-- ansible/roles/desktop-deploy/tasks/main.yml | 48 ++++----- .../roles/es-azure-snapshot/defaults/main.yml | 2 +- .../roles/es-azure-snapshot/tasks/main.yml | 14 ++- ansible/roles/grafana-backup/meta/main.yml | 2 - ansible/roles/grafana-backup/tasks/main.yml | 20 ++-- .../jenkins-backup-upload/tasks/main.yml | 20 ++-- ansible/roles/mongodb-backup/meta/main.yml | 2 - ansible/roles/mongodb-backup/tasks/main.yml | 16 ++- .../roles/offline-installer/tasks/main.yml | 2 +- ...ploadToAzure.yml => upload_to_storage.yml} | 51 +++++----- .../meta/main.yml | 2 - .../tasks/main.yml | 27 +++-- .../tasks/main.yml | 17 ++-- ansible/roles/postgresql-backup/meta/main.yml | 2 - .../roles/postgresql-backup/tasks/main.yml | 28 +++--- .../roles/postgresql-restore/meta/main.yml | 2 - .../roles/postgresql-restore/tasks/main.yml | 18 ++-- .../roles/prometheus-backup-v2/tasks/main.yml | 15 ++- ansible/roles/prometheus-backup/meta/main.yml | 2 - .../roles/prometheus-backup/tasks/main.yml | 26 +++-- .../roles/prometheus-restore/tasks/main.yml | 22 +++-- ansible/roles/redis-backup/meta/main.yml | 2 - ansible/roles/redis-backup/tasks/main.yml | 20 ++-- ansible/roles/upload-batch/tasks/main.yml | 8 -- ansible/uploadFAQs.yml | 61 +++++------- pipelines/certs-templates/Jenkinsfile.upload | 61 ------------ .../org_sunbird_questionunit_quml/Jenkinsfile | 4 +- .../ansible/inventory/dev/Core/common.yml | 15 ++- .../ansible/inventory/dev/Core/secrets.yml | 14 ++- 68 files changed, 662 insertions(+), 605 deletions(-) delete mode 100644 ansible/cert-file-upload.yml delete mode 100644 ansible/plugin.yml create mode 100644 ansible/plugins.yml delete mode 100644 ansible/roles/artifacts-download-azure/tasks/main.yml delete mode 100644 ansible/roles/artifacts-upload-azure/tasks/main.yml delete mode 100644 ansible/roles/assets-upload-azure/defaults/main.yml delete mode 100755 ansible/roles/assets-upload-azure/tasks/main.yml create mode 100644 ansible/roles/azure-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-download.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-upload.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/container-create.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml delete mode 100755 ansible/roles/blob-batch-delete-azure/tasks/main.yml delete mode 100644 ansible/roles/cassandra-backup/meta/main.yml delete mode 100644 ansible/roles/cassandra-restore/meta/main.yml delete mode 100644 ansible/roles/cert-file-upload/defaults/main.yml delete mode 100644 ansible/roles/cert-file-upload/tasks/main.yml delete mode 100644 ansible/roles/grafana-backup/meta/main.yml delete mode 100644 ansible/roles/mongodb-backup/meta/main.yml rename ansible/roles/offline-installer/tasks/{uploadToAzure.yml => upload_to_storage.yml} (67%) delete mode 100644 ansible/roles/postgres-azure-managed-service-backup/meta/main.yml delete mode 100644 ansible/roles/postgresql-backup/meta/main.yml delete mode 100644 ansible/roles/postgresql-restore/meta/main.yml delete mode 100644 ansible/roles/prometheus-backup/meta/main.yml delete mode 100644 ansible/roles/redis-backup/meta/main.yml delete mode 100644 ansible/roles/upload-batch/tasks/main.yml delete mode 100644 pipelines/certs-templates/Jenkinsfile.upload diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index feb78219b8..2872fa1013 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -3,8 +3,15 @@ become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_artifact_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_artifact_storage_account_sas }}" - roles: - - artifacts-download-azure + tasks: + - name: download artifact from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ artifacts_container }}" + blob_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + storage_account_name: "{{ azure_artifact_storage_account_name }}" + storage_account_key: "{{ azure_artifact_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 772ec2cca4..642a9aa111 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -3,8 +3,16 @@ become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_artifact_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_artifact_storage_account_sas }}" - roles: - - artifacts-upload-azure + tasks: + - name: upload artifact to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ artifacts_container }}" + container_public_access: "off" + blob_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + storage_account_name: "{{ azure_artifact_storage_account_name }}" + storage_account_key: "{{ azure_artifact_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index b356cf9362..8bc0ac9123 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -1,8 +1,28 @@ --- - hosts: localhost - vars: - ansible_connection: local vars_files: - ['{{inventory_dir}}/secrets.yml', 'secrets/{{env}}.yml'] - roles: - - assets-upload-azure + tasks: + - name: set common azure variables + set_fact: + blob_container_name: "{{ player_cdn_container }}" + container_public_access: "container" + blob_container_folder_path: "" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + when: cloud_service_provider == "azure" + + - name: delete files and folders from azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: delete-using-azcopy.yml + when: cloud_service_provider == "azure" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + local_file_or_folder_path: "{{ assets }}" + when: cloud_service_provider == "azure" diff --git a/ansible/cert-file-upload.yml b/ansible/cert-file-upload.yml deleted file mode 100644 index e29a7b6c2b..0000000000 --- a/ansible/cert-file-upload.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- hosts: localhost - become: yes - vars_files: - - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - roles: - - cert-file-upload diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index cd2b5b512b..7c4958a5f5 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -3,9 +3,6 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - name: rename env_domain in preview_cdn.html for CDN shell: | @@ -15,40 +12,62 @@ tags: - preview - - name: delete batch - shell: | - azcopy rm "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ plugin_container_name }}/{{ folder_name }}{{sunbird_public_storage_account_sas}}" --recursive=true - async: 3600 - poll: 10 - tags: - - content-editor - - collection-editor - - generic-editor - - preview - - - name: upload batch - command: "az storage blob upload-batch --destination {{ plugin_container_name }}/{{ folder_name }} --source {{ source_name }}" - async: 3600 - poll: 10 - tags: - - content-editor - - collection-editor - - generic-editor - - preview - - editor - - core-plugins - - - - name: upload file - command: "az storage blob upload --container-name {{ plugin_container_name }} --file {{ source_file_name }} --name artefacts/content-player/content-player-{{ player_version_number }}.zip" - async: 3600 - poll: 10 - tags: - - preview - - - name: run az_copy.sh - shell: "bash {{ az_file_path }} {{ plugin_container_name }} {{ source_file }}" - async: 3600 - poll: 10 - tags: - - plugins + - name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ plugin_container_name }}" + container_public_access: "container" + blob_container_folder_path: "/{{ folder_name }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + + - block: + - name: delete files and folders from azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: delete-using-azcopy.yml + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + + - block: + - name: run the az_copy.sh script + shell: "bash {{ az_file_path }} {{ plugin_container_name }} {{ source_file }}" + async: 3600 + poll: 10 + environment: + AZURE_STORAGE_ACCOUNT: "{{ azure_public_storage_account_name }}" + AZURE_STORAGE_SAS_TOKEN: "{{ azure_public_storage_account_sas }}" + tags: + - plugins + when: cloud_service_provider == "azure" diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 9dbeddd1c0..7c7e992039 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -1,49 +1,50 @@ - hosts: localhost - become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - vars: - artifacts_container: "{{desktop_container}}" - artifact: "{{destination_path}}" - artifact_path: "{{playbook_dir}}/../{{src_file_path}}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - roles: - - artifacts-upload-azure - tags: - - upload-desktop-faq - - -- hosts: localhost - become: yes - vars_files: - - "{{inventory_dir}}/secrets.yml" - vars: - artifacts_container: "{{desktop_container}}" - artifact: "{{destination_path}}" - artifact_path: "{{playbook_dir}}/../{{src_file_path}}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_private_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_private_storage_account_key }}" - roles: - - artifacts-upload-azure - tags: - - upload-label - -- hosts: localhost - become: yes - vars_files: - - "{{inventory_dir}}/secrets.yml" - vars: - source_path: "{{playbook_dir}}/../{{src_file_path}}" - destination_path: "{{destination_path}}" - container_name: "{{desktop_container}}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - roles: - - upload-batch - tags: - - upload-chatbot-config - - upload-batch + tasks: + - name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ desktop_container }}" + blob_file_name: "{{ destination_path }}" + blob_container_folder_path: "/{{ destination_path }}" + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + + - block: + - name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + container_public_access: "container" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + container_public_access: "off" + storage_account_name: "{{ azure_private_storage_account_name }}" + storage_account_key: "{{ azure_private_storage_account_key }}" + tags: + - upload-label + + - block: + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + container_public_access: "container" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 1f510c2c92..54b0672ed9 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -1,11 +1,7 @@ - hosts: local - become: yes gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - name: Create directories file: @@ -22,9 +18,16 @@ dest: dial_schema_template_files/{{ item.path }} with_filetree: "{{ source_name }}" when: item.state == 'file' - - - name: upload batch - command: "az storage blob upload-batch --destination {{ dial_plugin_container_name }}/schemas/local --source dial_schema_template_files" - async: 3600 - poll: 10 + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ dial_plugin_container_name }}" + container_public_access: "blob" + blob_container_folder_path: "/schemas/local" + local_file_or_folder_path: "dial_schema_template_files" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/grafana-backup.yml b/ansible/grafana-backup.yml index 9770875812..e5ee720b59 100644 --- a/ansible/grafana-backup.yml +++ b/ansible/grafana-backup.yml @@ -2,8 +2,5 @@ become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_name }}" roles: - grafana-backup diff --git a/ansible/jenkins-backup.yml b/ansible/jenkins-backup.yml index 4506a6bf62..acc4172c73 100644 --- a/ansible/jenkins-backup.yml +++ b/ansible/jenkins-backup.yml @@ -3,9 +3,5 @@ hosts: jenkins-master vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - roles: - jenkins-backup-upload diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 0ca52f5f02..843abfbd19 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -1,13 +1,17 @@ - hosts: local - become: yes gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - - name: upload batch - command: "az storage blob upload-batch --destination {{ plugin_container_name }}/schemas/local --source {{ source_name }}" - async: 3600 - poll: 10 \ No newline at end of file + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ plugin_container_name }}" + container_public_access: "container" + blob_container_folder_path: "/schemas/local" + local_file_or_folder_path: "{{ source_name }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/mongodb-backup.yml b/ansible/mongodb-backup.yml index 95ff1e7d61..2ab4091fc4 100644 --- a/ansible/mongodb-backup.yml +++ b/ansible/mongodb-backup.yml @@ -2,8 +2,5 @@ become: yes vars_files: - ['{{inventory_dir}}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - mongodb-backup diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml index 92b484a580..48f59dd327 100644 --- a/ansible/nodebbui-upload.yml +++ b/ansible/nodebbui-upload.yml @@ -3,17 +3,27 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - - name: delete batch - shell: | - azcopy rm "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ nodebbui_container_name }}{{sunbird_public_storage_account_sas}}" --recursive=true - async: 3600 - poll: 10 + - name: delete files and folders from azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: delete-using-azcopy.yml + vars: + blob_container_name: "{{ nodebbui_container_name }}" + blob_container_folder_path: "" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + when: cloud_service_provider == "azure" - - name: upload batch - command: "az storage blob upload-batch --destination {{ nodebbui_container_name }} --source {{ source_name }}" - async: 3600 - poll: 10 + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ nodebbui_container_name }}" + container_public_access: "container" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ source_name }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/plugin.yml b/ansible/plugin.yml deleted file mode 100644 index ac8f93b5cf..0000000000 --- a/ansible/plugin.yml +++ /dev/null @@ -1,22 +0,0 @@ -- hosts: local - become: yes - gather_facts: no - vars_files: - - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - tasks: - - name: delte plugin org_sunbird_questionunit_quml - command: "az storage blob delete-batch --source {{ plugin_container_name }} --pattern content-plugins/{{ plugins_name }}" - async: 3600 - poll: 10 - tags: - - org_sunbird_questionunit_quml - - - name: upload plugin org_sunbird_questionunit_quml - command: "az storage blob upload-batch --destination {{ plugin_container_name }}/content-plugins/{{ plugins_name }} --source {{ source_file }}" - async: 3600 - poll: 10 - tags: - - org_sunbird_questionunit_quml diff --git a/ansible/plugins.yml b/ansible/plugins.yml new file mode 100644 index 0000000000..0245f1801a --- /dev/null +++ b/ansible/plugins.yml @@ -0,0 +1,28 @@ +--- +- hosts: local + gather_facts: false + vars_files: + - "{{inventory_dir}}/secrets.yml" + tasks: + - name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ plugin_container_name }}" + container_public_access: "container" + blob_delete_pattern: "content-plugins/{{ plugins_name }}" + blob_container_folder_path: "/content-plugins/{{ plugins_name }}" + local_file_or_folder_path: "{{ source_file }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + + - name: delete batch of files from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-delete-batch.yml + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + when: cloud_service_provider == "azure" diff --git a/ansible/prometheus-backup.yml b/ansible/prometheus-backup.yml index d31adbd125..65a87b3061 100644 --- a/ansible/prometheus-backup.yml +++ b/ansible/prometheus-backup.yml @@ -6,9 +6,6 @@ prometheus_url: "http://localhost:9090/prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-backup-v2 tags: @@ -23,8 +20,6 @@ vars_files: - ['{{inventory_dir}}/secrets.yml'] environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-backup-v2 tags: @@ -39,9 +34,6 @@ prometheus_url: "http://localhost:19090/prometheus" vars_files: - ['{{inventory_dir}}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-backup-v2 tags: diff --git a/ansible/prometheus-restore.yml b/ansible/prometheus-restore.yml index 5d4bba9731..1ddf6971d5 100644 --- a/ansible/prometheus-restore.yml +++ b/ansible/prometheus-restore.yml @@ -6,9 +6,6 @@ prometheus_service_name: "monitor_prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-restore tags: @@ -22,9 +19,6 @@ prometheus_service_name: "prometheus_fed_prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-restore tags: @@ -38,9 +32,6 @@ prometheus_service_name: "monitor_stateful_prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-restore tags: diff --git a/ansible/redis-backup.yml b/ansible/redis-backup.yml index af7b1564ed..72ab28e584 100644 --- a/ansible/redis-backup.yml +++ b/ansible/redis-backup.yml @@ -3,9 +3,6 @@ gather_facts: false vars_files: - ['{{inventory_dir}}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - redis-backup run_once: true diff --git a/ansible/roles/artifacts-download-azure/tasks/main.yml b/ansible/roles/artifacts-download-azure/tasks/main.yml deleted file mode 100644 index db79bc213f..0000000000 --- a/ansible/roles/artifacts-download-azure/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Ensure azure blob storage container exists - command: az storage container exists --name {{ artifacts_container }} - -- name: Download from azure blob storage - command: az storage blob download -c {{ artifacts_container }} --name {{ artifact }} -f {{ artifact_path }} - async: 3600 - poll: 10 diff --git a/ansible/roles/artifacts-upload-azure/tasks/main.yml b/ansible/roles/artifacts-upload-azure/tasks/main.yml deleted file mode 100644 index 785dc1a455..0000000000 --- a/ansible/roles/artifacts-upload-azure/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ artifacts_container }} - -- name: Upload to azure blob storage - command: az storage blob upload -c {{ artifacts_container }} --name {{ artifact }} -f {{ artifact_path }} - async: 3600 - poll: 10 diff --git a/ansible/roles/assets-upload-azure/defaults/main.yml b/ansible/roles/assets-upload-azure/defaults/main.yml deleted file mode 100644 index 13cc322514..0000000000 --- a/ansible/roles/assets-upload-azure/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ -delete: True -player_cdn_container: -player_cdn_account: -player_cdn_account_key: -assets: diff --git a/ansible/roles/assets-upload-azure/tasks/main.yml b/ansible/roles/assets-upload-azure/tasks/main.yml deleted file mode 100755 index 520641f5e3..0000000000 --- a/ansible/roles/assets-upload-azure/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Deleting container before Uploding assets - command: az storage blob delete-batch -s {{player_cdn_container}} - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_SAS_TOKEN: "{{sunbird_public_storage_account_sas}}" - async: 3600 - poll: 10 - -- name: Ensure azure blob storage container exists - command: az storage container create --name {{player_cdn_container}} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_SAS_TOKEN: "{{sunbird_public_storage_account_sas}}" - - -# Upload the assets created by the job to azure -- name: Upload to azure blob storage - command: az storage blob upload-batch -d {{player_cdn_container}} -s {{assets}} - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_SAS_TOKEN: "{{sunbird_public_storage_account_sas}}" - async: 3600 - poll: 10 diff --git a/ansible/roles/azure-cli/tasks/main.yml b/ansible/roles/azure-cli/tasks/main.yml index 0a1148f2c4..0374f6a0ec 100644 --- a/ansible/roles/azure-cli/tasks/main.yml +++ b/ansible/roles/azure-cli/tasks/main.yml @@ -1,16 +1,16 @@ --- - name: Add Microsfot signing key - ansible.builtin.apt_key: + apt_key: url: https://packages.microsoft.com/keys/microsoft.asc state: present - name: Add Microsfot repository into sources list - ansible.builtin.apt_repository: + apt_repository: repo: "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ {{ ansible_distribution_release | lower }} main" state: present - name: Install azue cli and dependent packages - ansible.builtin.apt: + apt: pkg: - ca-certificates - curl diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..0e4e45bf95 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -0,0 +1,67 @@ +# The name of the blob container in the azure storage account +# Example - +# blob_container_name: "my-container" +blob_container_name: "" + +# The delete pattern to delete files and folder +# Example - +# blob_delete_pattern: "my-drectory/*" +# blob_delete_pattern: "my-drectory/another-directory/*" +# blob_delete_pattern: "*" +blob_delete_pattern: "" + +# The storage account name +# Example - +# storage_account_name: "sunbird-dev-public" +storage_account_name: "" + +# The storage account key +# Example - +# storage_account_name: "cmFuZG9tcmFuZG9tcmFuZG9tcmFuZG9tCg==" +storage_account_key: "" + +# The path to local file which has to be uploaded to azure storage +# The local path to store the file after downloading from azure storage +# Example - +# local_file_or_folder_path: "/workspace/my-folder/myfile.json" +# local_file_or_folder_path: "/workspace/my-folder" +local_file_or_folder_path: "" + +# The name of the file in azure storage after uploading from local +# The name of the file in azure storage that has to be downloaded +# Example - +# blob_file_name: "myfile-blob.json" +# You can also pass folder path in order to upload / download the file from a speciic folder +# blob_file_name "my-folder/my-file.json" +blob_file_name: "" + +# The storage account sas token +# Example - +# storage_account_sas_token: "?sv=2022-01-01&ss=abc&srt=rws%3D" +storage_account_sas_token: "" + +# The folder path in azure storage to upload the files starting from the root of the container +# This path should alwasy start with a slash / as we are going to append this value as shown in below example +# Example - +# blob_container_name: "my-container" +# blob_container_folder_path: "/my-folder-path" +# {{ blob_container_name }}{{ blob_container_folder_path }} +# The above translates to "my-container/my-folder-path" + +# The variable can also be empty as shown below, which means we will upload directly at the root path of the container +# Example - +# blob_container_name: "my-container" +# blob_container_folder_path: "" +# The above translates to "my-container" +blob_container_folder_path: "" + +# At what access level the container should be created +# Example - +# container_public_access: "off" +# container_public_access: "blob" +# container_public_access: "container" +# Allowed values are - off, blob, container +# This variable affects only new containers and has no affect on a container if it already exists +# If the container already exists, the access level will not be changed +# You will need to change the access level from Azure portal or using az storage container set-permission command +container_public_access: "" \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml new file mode 100644 index 0000000000..4e8ad68a2d --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -0,0 +1,5 @@ +--- +- name: delete files and folders from a blob container recursively + shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-download.yml b/ansible/roles/azure-cloud-storage/tasks/blob-download.yml new file mode 100644 index 0000000000..3bbf4b607a --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-download.yml @@ -0,0 +1,5 @@ +--- +- name: download a file from azure storage + shell: "az storage blob download --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml new file mode 100644 index 0000000000..3043da46cc --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -0,0 +1,10 @@ +--- +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + +- name: upload files and folders from a local directory to azure storage container + shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml new file mode 100644 index 0000000000..4b493ffb73 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -0,0 +1,10 @@ +--- +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + +- name: upload file to azure storage container + shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/container-create.yml b/ansible/roles/azure-cloud-storage/tasks/container-create.yml new file mode 100644 index 0000000000..419510cc19 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/container-create.yml @@ -0,0 +1,8 @@ +--- +- name: create container in azure storage if it doesn't exist + shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + when: storage_account_key | length > 0 + +- name: create container in azure storage if it doesn't exist + shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --sas-token '{{ storage_account_sas_token }}'" + when: storage_account_sas_token | length > 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml new file mode 100644 index 0000000000..236169e86c --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml @@ -0,0 +1,7 @@ +--- +- name: delete files and folders from azure storage using azcopy + shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + environment: + AZCOPY_CONCURRENT_FILES: "10" + async: 10800 + poll: 10 diff --git a/ansible/roles/azure-cloud-storage/tasks/main.yml b/ansible/roles/azure-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..eb435ecfe2 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: delete files and folders from azure storage container recursively + include: blob-delete-batch.yml + +- name: download a file from azure storage + include: blob-download.yml + +- name: upload files and folders from a local directory to azure storage container + include: blob-upload-batch.yml + +- name: upload file to azure storage container + include: blob-upload.yml + +- name: create container in azure storage if it doesn't exist + include: container-create.yml + +- name: delete files and folders from azure storage using azcopy + include: delete-using-azcopy.yml + +- name: upload files and folders to azure storage using azcopy + include: upload-using-azcopy.yml diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml new file mode 100644 index 0000000000..99ab3c2bf8 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -0,0 +1,12 @@ +--- +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + +- name: upload files and folders to azure storage using azcopy + shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + environment: + AZCOPY_CONCURRENT_FILES: "10" + async: 10800 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/blob-batch-delete-azure/tasks/main.yml b/ansible/roles/blob-batch-delete-azure/tasks/main.yml deleted file mode 100755 index 4d84085ed5..0000000000 --- a/ansible/roles/blob-batch-delete-azure/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ -# Delete the assets in container -- name: Upload to azure blob storage - command: az storage blob delete-batch -s {{container}} --pattern {{blob_pattern}} --dryrun - environment: - AZURE_STORAGE_ACCOUNT: "{{blob_account}}" - AZURE_STORAGE_KEY: "{{blob_account_key}}" - async: 60 - poll: 10 diff --git a/ansible/roles/cassandra-backup/meta/main.yml b/ansible/roles/cassandra-backup/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/cassandra-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index a6611c8ebf..d6365315d6 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -3,11 +3,11 @@ ignore_errors: true - name: Create the directory - become: yes + become: true file: path=/data/cassandra/backup state=directory recurse=yes - name: copy the backup script - become: yes + become: true template: src: ../../../../deploy/cassandra_backup.py dest: /data/cassandra/backup/cassandra_backup.py @@ -17,7 +17,7 @@ cassandra_backup_gzip_file_name: "cassandra-backup-{{ lookup('pipe', 'date +%Y%m%d') }}-{{ ansible_hostname }}-new" - name: run the backup script - become: yes + become: true shell: python3 cassandra_backup.py --snapshotname "{{ cassandra_backup_gzip_file_name }}" --snapshotdirectory "{{ cassandra_backup_gzip_file_name }}" "{{additional_arguments|d('')}}" args: chdir: /data/cassandra/backup @@ -32,21 +32,17 @@ debug: var: doc_data -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ cassandra_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: "azcopy copy {{cassandra_backup_gzip_file_name}} 'https://{{sunbird_management_storage_account_name}}.blob.core.windows.net/{{cassandra_backup_azure_container_name}}{{sunbird_management_storage_account_sas}}' --recursive" - environment: - AZCOPY_CONCURRENT_FILES: 10 # How many files azcopy should read concurrently. - args: - chdir: /data/cassandra/backup - async: 10800 - poll: 10 - +- name: upload file to azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: upload-using-azcopy.yml + vars: + blob_container_name: "{{ cassandra_backup_azure_container_name }}" + container_public_access: "off" + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_gzip_file_name }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_sas_token: "{{ azure_management_storage_account_sas }}" + when: cloud_service_provider == "azure" + - name: clean up backup dir after upload file: path="{{ cassandra_backup_dir }}" state=absent diff --git a/ansible/roles/cassandra-restore/meta/main.yml b/ansible/roles/cassandra-restore/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/cassandra-restore/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 6dcb7d97df..80b8f86863 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -1,52 +1,53 @@ - name: Stop the cassandra - become: yes + become: true service: name=cassandra state=stopped - set_fact: cassandra_restore_gzip_file_path: "{{ cassandra_restore_dir }}/{{ cassandra_restore_gzip_file_name }}" - -- name: Download to azure blob storage - command: az storage blob download --name {{ cassandra_restore_gzip_file_name }} --file {{ cassandra_restore_gzip_file_path }} --container-name {{ cassandra_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 - + +- name: download a file from azure storage + become: true + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ cassandra_backup_azure_container_name }}" + blob_file_name: "{{ cassandra_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" + - name: unarchieve restore artifact - become: yes + become: true unarchive: src={{user_home}}/{{ cassandra_restore_gzip_file_name }} dest={{user_home}}/ copy=no - name: Remove the restroe artefact - become: yes + become: true file: path={{user_home}}/cassandra* state=absent - name: Remove the old data - become: yes + become: true file: path=/var/lib/cassandra/data/sunbird state=absent - name: Replace the new data - become: yes + become: true command: mv {{user_home}}/data/sunbird /var/lib/cassandra/data/ - - name: remove data - become: yes + become: true file: path: "/home/{{ ansible_ssh_user }}/data" state: absent - name: change the permissions - become: yes + become: true file: path=/var/lib/cassandra/data owner=cassandra group=cassandra recurse=yes - name: copy the backup script - become: yes + become: true template: src=nodetool.j2 dest={{user_home}}/nodetool.sh mode=0755 - - name: Start the cassandra - become: yes + become: true service: name=cassandra state=started - - diff --git a/ansible/roles/cert-file-upload/defaults/main.yml b/ansible/roles/cert-file-upload/defaults/main.yml deleted file mode 100644 index b74bef14c9..0000000000 --- a/ansible/roles/cert-file-upload/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -file_name: "certUploadedfile" -cert_template_artifacts_container_name: "e-credentials" diff --git a/ansible/roles/cert-file-upload/tasks/main.yml b/ansible/roles/cert-file-upload/tasks/main.yml deleted file mode 100644 index 9fcaf6a679..0000000000 --- a/ansible/roles/cert-file-upload/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ -- name: rename the file to the correct name as required in blobstore - command: "mv /tmp/certUploadedfile /tmp/{{file_name}}" - -- name: Upload to azure blob storage - command: "az storage blob upload -c {{ cert_template_artifacts_container_name }} --name {{ file_name }} -f /tmp/{{file_name}}" - async: 3600 - poll: 10 diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index b41aaef0a8..321a91f139 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -31,21 +31,18 @@ chdir: "{{cert_location}}/cert-templates/certUtilScripts/" when: createPublicKey is defined -- name: Ensure azure blob storage container exists - command: az storage container create --name {{cert_service_container_name}} --public-access off - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_private_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_private_storage_account_key}}" - -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{cert_service_container_name}} --source "out" - args: - chdir: "{{cert_location}}/cert-templates/certUtilScripts/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_private_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_private_storage_account_key}}" - async: 60 - poll: 10 +- name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ cert_service_container_name }}" + container_public_access: "off" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + storage_account_name: "{{ azure_private_storage_account_name }}" + storage_account_key: "{{ azure_private_storage_account_key }}" + when: cloud_service_provider == "azure" - name: list all the files shell: "ls -lR {{cert_location}}" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 0d301fbed0..963189ad4d 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -49,28 +49,28 @@ - name: run the installer script shell: "bash -x {{offline_repo_location}}/build.sh" -- name: Ensure azure blob storage container exists - command: az storage container create --name {{offline_installer_container_name}} --public-access blob - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" +- name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ offline_installer_container_name }}" + container_public_access: "blob" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{offline_installer_container_name}} --source "desktop_uploader_assets" - args: - chdir: "{{offline_repo_location}}/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 - -- name: Upload to latest.json file to blob - command: az storage blob upload-batch --destination "{{offline_installer_container_name}}/latest" --source "{{offline_repo_location}}/desktop_uploader_assets/{{time}}/" - args: - chdir: "{{offline_repo_location}}/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index 99787b0f89..9072442956 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -10,4 +10,4 @@ snapshot_create_request_body: { es_snapshot_host: "localhost" snapshot_base_path: "default" -es_azure_backup_folder_name: "elasticsearch-snapshots" \ No newline at end of file +es_azure_backup_container_name: "elasticsearch-snapshots" \ No newline at end of file diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index d2880013d5..2b7f23e576 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -4,11 +4,15 @@ - set_fact: snapshot_number="snapshot_{{ lookup('pipe','date +%s') }}" -- name: Ensure backup folder exists in azure blob - shell: "az storage container create --name {{ es_azure_backup_folder_name }}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ azure_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ azure_management_storage_account_key }}" +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + vars: + blob_container_name: "{{ es_azure_backup_container_name }}" + container_public_access: "off" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" - name: Create Azure Repository uri: diff --git a/ansible/roles/grafana-backup/meta/main.yml b/ansible/roles/grafana-backup/meta/main.yml deleted file mode 100644 index a124d4f7cb..0000000000 --- a/ansible/roles/grafana-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 62d4c4dfe2..786bd13442 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -19,14 +19,18 @@ async: 3600 poll: 10 -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ grafana_backup_azure_container_name }} - ignore_errors: true - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ grafana_backup_gzip_file_name }} --file {{ grafana_backup_gzip_file_path }} --container-name {{ grafana_backup_azure_container_name }} - async: 5000 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ grafana_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ grafana_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path="{{ grafana_backup_dir }}" state=absent diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index f5ec21c7bb..e430e57c1c 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -12,11 +12,15 @@ - name: Create archive of backup directory archive: path="{{ jenkins_backup_base_dir }}/{{ LATEST_BACKUP_DIR.stdout }}" dest="/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" format=zip -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ jenkins_backup_azure_container_name }} - -- name: Upload to azure blob storage - command: az storage blob upload -c {{ jenkins_backup_azure_container_name }} --name "{{ LATEST_BACKUP_DIR.stdout }}.zip" -f "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" - async: 3600 - poll: 10 - +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ jenkins_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" + local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/roles/mongodb-backup/meta/main.yml b/ansible/roles/mongodb-backup/meta/main.yml deleted file mode 100644 index a124d4f7cb..0000000000 --- a/ansible/roles/mongodb-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 219ced55ea..4235e52c32 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Create the directory file: path={{ mongo_backup_dir }} state=directory recurse=yes @@ -13,13 +14,18 @@ - name: Compress the backup file shell: "tar -czf {{ mongo_backup_file_path }}.tar.gz {{ mongo_backup_file_path }}" -- name: upload to azure +- name: upload file to azure storage include_role: - name: artifacts-upload-azure + name: azure-cloud-storage + tasks_from: blob-upload.yml vars: - artifact: "{{ mongo_backup_file_name }}.tar.gz" - artifact_path: "{{ mongo_backup_file_path }}.tar.gz" - artifacts_container: "{{ mongo_backup_azure_container_name }}" + blob_container_name: "{{ mongo_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" + local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path={{ mongo_backup_dir }} state=absent diff --git a/ansible/roles/offline-installer/tasks/main.yml b/ansible/roles/offline-installer/tasks/main.yml index 31bc47f627..e110fd3116 100644 --- a/ansible/roles/offline-installer/tasks/main.yml +++ b/ansible/roles/offline-installer/tasks/main.yml @@ -41,7 +41,7 @@ when: uploadInstaller is not defined - name: upload to azure - include: uploadToAzure.yml + include: upload_to_storage.yml when: uploadInstaller is defined - name: Delete offline installer folder if any issue diff --git a/ansible/roles/offline-installer/tasks/uploadToAzure.yml b/ansible/roles/offline-installer/tasks/upload_to_storage.yml similarity index 67% rename from ansible/roles/offline-installer/tasks/uploadToAzure.yml rename to ansible/roles/offline-installer/tasks/upload_to_storage.yml index 62d4378d10..3c62d6fc03 100644 --- a/ansible/roles/offline-installer/tasks/uploadToAzure.yml +++ b/ansible/roles/offline-installer/tasks/upload_to_storage.yml @@ -1,9 +1,4 @@ -- name: Ensure azure blob storage container exists - command: az storage container create --name {{offline_installer_container_name}} --public-access blob - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - +--- - name: Get the environment name for the artifact name shell: "cat {{offline_repo_location}}/offline-installer-repo/src/package.json | jq -r '.name'" register: env_name @@ -56,25 +51,31 @@ - artifacts.sh - metadata.sh -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{offline_installer_container_name}} --source "offline_artifacts" - args: - chdir: "{{offline_repo_location}}/offline-installer-repo/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 - -- name: Upload to latest.json file to blob - command: az storage blob upload-batch --destination "{{offline_installer_container_name}}/latest" --source "{{folderName.stdout}}" - args: - chdir: "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 +- name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ offline_installer_container_name }}" + container_public_access: "blob" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "" + local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts/{{ folderName.stdout }}" + when: cloud_service_provider == "azure" - name: Create a zip of the folder to archieve the artifact archive: diff --git a/ansible/roles/postgres-azure-managed-service-backup/meta/main.yml b/ansible/roles/postgres-azure-managed-service-backup/meta/main.yml deleted file mode 100644 index 5927f82724..0000000000 --- a/ansible/roles/postgres-azure-managed-service-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - #- azure-cli \ No newline at end of file diff --git a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml index cc5ede8e0e..a64f3639af 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml @@ -11,7 +11,6 @@ - set_fact: postgresql_backup_gzip_file_path: "{{ postgresql_backup_dir }}/{{ postgresql_backup_gzip_file_name }}.zip" - - name: Dump an existing database to a file postgresql_db: login_user: "{{ sunbird_pg_user }}" @@ -42,20 +41,18 @@ async: 500 poll: 10 -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ postgresql_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ postgresql_backup_gzip_file_name }}.zip --file {{ postgresql_backup_gzip_file_path }} --container-name {{ postgresql_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ postgresql_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path="{{ postgresql_backup_dir }}" state=absent diff --git a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml index ba413e7943..135c29280c 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml @@ -9,12 +9,17 @@ - set_fact: postgres_backup_filepath: "{{ postgresql_restore_dir }}/{{ postgres_backup_filename }}" -- name: Download backup from azure - command: az storage blob download -c {{ postgres_backup_azure_container_name }} --name {{ postgres_backup_filename }} -f {{ postgres_backup_filepath }} - args: - chdir: "{{ postgres_restore_dir }}" - async: 100 - poll: 10 +- name: download a file from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ postgres_backup_azure_container_name }}" + blob_file_name: "{{ postgres_backup_filename }}" + local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: unarchive artifact unarchive: src={{ postgresql_restore_dir }}/{{ postgres_backup_filename }} dest={{ postgresql_restore_dir }}/ copy=no diff --git a/ansible/roles/postgresql-backup/meta/main.yml b/ansible/roles/postgresql-backup/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/postgresql-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index c71f15510f..2e25619a10 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -13,20 +13,18 @@ async: 3600 poll: 10 -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ postgresql_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ postgresql_backup_gzip_file_name }} --file {{ postgresql_backup_gzip_file_path }} --container-name {{ postgresql_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ postgresql_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ postgresql_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload - file: path="{{ postgresql_backup_dir }}" state=absent + file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/meta/main.yml b/ansible/roles/postgresql-restore/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/postgresql-restore/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index 47f9aa0f05..4075baa596 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -4,13 +4,17 @@ - set_fact: postgresql_restore_gzip_file_path: "{{ postgresql_restore_dir }}/{{ postgresql_restore_gzip_file_name }}" -- name: Download restore file from azure - command: az storage blob download --container-name {{ postgresql_restore_azure_container_name }} --name {{ postgresql_restore_gzip_file_name }} --file {{ postgresql_restore_gzip_file_path }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ postgresql_restore_azure_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ postgresql_restore_azure_storage_access_key }}" - async: 3600 - poll: 10 +- name: download a file from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ postgresql_restore_azure_container_name }}" + blob_file_name: "{{ postgresql_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: ensure postgresql service is stopped service: name=postgresql state=stopped diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index af34edddad..6286f31ebb 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -16,13 +16,18 @@ path: "{{ prometheus_data_dir }}/snapshots/{{ snapshot_name }}" dest: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" -- name: upload to azure +- name: upload file to azure storage include_role: - name: artifacts-upload-azure + name: azure-cloud-storage + tasks_from: blob-upload.yml vars: - artifact: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - artifact_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - artifacts_container: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: Deleting snapshot file: diff --git a/ansible/roles/prometheus-backup/meta/main.yml b/ansible/roles/prometheus-backup/meta/main.yml deleted file mode 100644 index bb605fa878..0000000000 --- a/ansible/roles/prometheus-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 712dd6faf3..1a71443c28 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -25,20 +25,18 @@ shell: "docker service scale monitor_prometheus=1" delegate_to: "{{groups['swarm-bootstrap-manager'][0]}}" -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ prometheus_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ prometheus_backup_gzip_file_name }} --file {{ prometheus_backup_gzip_file_path }} --container-name {{ prometheus_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ prometheus_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ prometheus_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path="{{ prometheus_backup_dir }}" state=absent diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 686d60e195..9b2a176882 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -1,20 +1,25 @@ - name: ensure backup dir exists file: path="{{ prometheus_backup_dir }}" state=directory -- name: Download backup from azure - command: az storage blob download -c {{ prometheus_backup_azure_container_name }} --name {{ prometheus_backup_filename }} -f {{ prometheus_backup_filepath }} - args: - chdir: "{{ prometheus_backup_dir }}" - async: 100 - poll: 10 +- name: download a file from azure storage + become: true + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_file_name: "{{ prometheus_backup_filename }}" + local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: ensure prometheus is stopped shell: "docker service scale {{prometheus_service_name}}=0 && sleep 10" delegate_to: "{{manager_host}}" #variable is passed as extra vars from jenkins - - name: Unarchive backup - become: yes + become: true unarchive: src: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filename }}" dest: "{{prometheus_data_dir}}/" @@ -29,4 +34,3 @@ - name: clean up backup dir file: path="{{ prometheus_backup_dir }}" state=absent - diff --git a/ansible/roles/redis-backup/meta/main.yml b/ansible/roles/redis-backup/meta/main.yml deleted file mode 100644 index a124d4f7cb..0000000000 --- a/ansible/roles/redis-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 3519bb1ea9..fa621b4d6d 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -1,6 +1,5 @@ - name: Create the directory file: path={{ redis_backup_dir }} state=directory recurse=yes - - set_fact: redis_backup_file_name: "redis-backup-{{ lookup('pipe', 'date +%Y-%m-%d-%T') }}.rdb" @@ -8,22 +7,25 @@ - set_fact: redis_backup_file_path: "{{ redis_backup_dir }}/{{ redis_backup_file_name }}" - - name: copy dump.rdb file copy: src: /home/learning/redis-stable/dump.rdb dest: "{{ redis_backup_dir }}/{{ redis_backup_file_name }}" remote_src: yes - -- name: upload to azure +- name: upload file to azure storage include_role: - name: artifacts-upload-azure + name: azure-cloud-storage + tasks_from: blob-upload.yml vars: - artifact: "{{ redis_backup_file_name }}" - artifact_path: "{{ redis_backup_file_path }}" - artifacts_container: "{{ nodebb_redis_backup_azure_container_name }}" - + blob_container_name: "{{ nodebb_redis_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ redis_backup_file_name }}" + local_file_or_folder_path: "{{ redis_backup_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" + - name: clean up backup dir after upload file: path={{ redis_backup_dir }} state=absent diff --git a/ansible/roles/upload-batch/tasks/main.yml b/ansible/roles/upload-batch/tasks/main.yml deleted file mode 100644 index 1055bdb7f4..0000000000 --- a/ansible/roles/upload-batch/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ container_name }} --public-access container - -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{ destination_path }} --source {{ source_path }} - async: 3600 - poll: 10 diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 1cc8fdbe8f..a4da2d4ede 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -1,47 +1,34 @@ - hosts: localhost - become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - vars: - container_name: "{{ blob_container }}" - destination_path: "{{ blob_container }}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" tasks: - - name: upload files - include_role: - name: upload-batch - vars: - source_path: "{{ playbook_dir }}/../utils/{{ item }}" - with_items: - - "{{ source_folder.split(',') }}" + - name: template schema files + template: + src: "{{ item }}" + dest: "{{ item }}" + with_fileglob: + - "{{ playbook_dir }}/../utils/sunbird-RC/schema/*.json" tags: - - upload-faqs - + - upload-RC-schema + - hosts: localhost - become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - vars: - container_name: "{{ blob_container }}" - destination_path: "{{ blob_container }}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" tasks: - - name: template schema files - template: - src: "{{ item }}" - dest: "{{ item }}" - with_fileglob: - - "{{ playbook_dir }}/../utils/sunbird-RC/schema/*.json" - - name: upload files - include_role: - name: upload-batch - vars: - source_path: "{{ playbook_dir }}/../utils/{{ item }}" - with_items: - - "{{ source_folder.split(',') }}" + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ blob_container }}" + container_public_access: "container" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "azure" tags: - - upload-RC-schema + - upload-faqs + - upload-RC-schema diff --git a/pipelines/certs-templates/Jenkinsfile.upload b/pipelines/certs-templates/Jenkinsfile.upload deleted file mode 100644 index 8b0ef0a6bd..0000000000 --- a/pipelines/certs-templates/Jenkinsfile.upload +++ /dev/null @@ -1,61 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage("upload") { - def inputFile = input message: 'Upload file', parameters: [file(name: 'certUploadedfile')] - new hudson.FilePath(new File("tmp/certUploadedfile")).copyFrom(inputFile) - } - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - stage('deploy'){ - values = [:] - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/cert-file-upload.yml" - ansibleExtraArgs = "--extra-vars \"file_name=${params.file_name}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = "SUCCESS" - currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - - stage('remove tmp file'){ - sh """ - rm -rf /tmp/certUploadedfile - """ - } - } - catch (err) { - currentBuild.result = "FAILURE" - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} diff --git a/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile b/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile index b8173de1a0..9d4521b956 100644 --- a/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile +++ b/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile @@ -34,8 +34,8 @@ node() { chmod a+x content-plugins/az_copy.sh mv content-plugins ansible """ - ansiblePlaybook = "${currentWs}/ansible/plugin.yml" - ansibleExtraArgs = "--tags org_sunbird_questionunit_quml --extra-vars \" plugins_name=${params.plugin_name} source_file=${currentWs}/ansible/content-plugins/${params.plugin_name}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansiblePlaybook = "${currentWs}/ansible/plugins.yml" + ansibleExtraArgs = "--extra-vars \" plugins_name=${params.plugin_name} source_file=${currentWs}/ansible/content-plugins/${params.plugin_name}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index e54937985e..fc439777b4 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -18,15 +18,20 @@ mail_server_username: "apikey" # Email provider userna sunbird_mail_server_from_email: "support@myorg.com" # Email ID that should be as from address in mails # List of mail ids to whome the monitoring alerts should be sent. alerts_mailing_list : "devops@myorg.com" # Comma separated mail list for Alerts; eg: user1@mail.com, user2@mail.com -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_artifact_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing artifacts data (like jenkins build zip files) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) + # Define the below if you are using Azure Cloud # Management Storage Account +# Note - You can use the same azure account for the below variables or have separate azure accounts +sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) +sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) +sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) +sunbird_artifact_storage_account_name: "{{ sunbird_management_storage_account_name }}" # Azure account name for storing artifacts data (like jenkins build zip files) + +azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" +azure_private_storage_account_name: "{{ sunbird_private_storage_account_name }}" azure_management_storage_account_name: "{{ sunbird_management_storage_account_name }}" +azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" # Define the below if you are using AWS Cloud # Management Storage Bucket diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 798aceb7e0..c373fa8c4f 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -8,27 +8,33 @@ core_vault_docker_registry_url: "change.docker.url" # for docker hub "https core_vault_docker_registry_user: "change.docker.username" core_vault_docker_registry_password: "change.docker.password" +# Define the below if you are using Azure Cloud +# Management Storage Account # Run the below command in shell # date +'%Y-%m-%dT%H:%m:%SZ' -d '+1 year' # sas_token=?`az storage account generate-sas --account-name "{{ azure_plugin_storage_account_name }}" --account-key "{{ azure_plugin_storage_account_key }}" --expiry $sas_expire_time --https-only --permissions acdlpruw --resource-types sco --services bfqt | xargs` # generate a sas for the blob for entire storage accout with write and read access -artifact_azure_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command sunbird_public_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command +sunbird_management_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command + sunbird_public_storage_account_key: "change.azure.storage.account.key" sunbird_private_storage_account_key: "change.azure.storage.account.key" sunbird_management_storage_account_key: "change.azure.storage.account.key" +sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" -# Define the below if you are using Azure Cloud -# Management Storage Account +azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" +azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" +azure_artifact_storage_account_key: "{{ sunbird_artifact_storage_account_key }}" +azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" +azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" # Define the below if you are using AWS Cloud # Management Storage Bucket aws_management_bucket_user_access_key: "" aws_management_bucket_user_secret_key: "" - # Define the below if you are using Google Cloud # Management Storage Bucket gcs_management_bucket_service_account: | From 85f8e5c20c9339cfbb8e0e39d2100cc5763b1807 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 21 Sep 2022 14:36:30 +0530 Subject: [PATCH 004/203] fix: adding mandatore var cloud_service_provider Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/common.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index fc439777b4..8277399b44 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -1,6 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # # ------------------------------------------------------------------------------------------------------------ # +cloud_service_provider: "" # Your cloud service provider name. Supported values are aws, azure, gcloud domain_name: "" # your domain name like example.com dockerhub: "change.docker.url" # docker hub username or url incase of private registry # This ip should be in the kubenetes subnet range. For example, if your kube cluster is running in `10.0.0.0/24, then it can be 10.0.0.5. Make sure this ip is not allocated to any other things.` From 847f536094008a2e7fef2bf994239a1c2e048ea1 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 23 Sep 2022 19:20:40 +0530 Subject: [PATCH 005/203] feat: modified vars for generic naming convention Signed-off-by: Keshav Prasad --- ansible/assets-upload.yml | 9 ++++++++- ansible/deploy-plugins.yml | 15 +++++++++++---- ansible/desktop-faq-upload.yml | 9 ++++++++- ansible/dial_upload-schema.yml | 9 ++++++++- ansible/kp_upload-schema.yml | 9 ++++++++- ansible/nodebbui-upload.yml | 11 +++++++++-- ansible/plugins.yml | 7 +++++++ ansible/roles/cassandra-backup/defaults/main.yml | 9 ++++++++- ansible/roles/cassandra-backup/tasks/main.yml | 2 +- ansible/roles/cassandra-restore/defaults/main.yml | 7 +++++++ ansible/roles/cassandra-restore/tasks/main.yml | 2 +- ansible/roles/cert-templates/defaults/main.yml | 7 +++++++ ansible/roles/cert-templates/tasks/main.yml | 2 +- ansible/roles/desktop-deploy/defaults/main.yml | 7 +++++++ ansible/roles/desktop-deploy/tasks/main.yml | 2 +- ansible/roles/es-azure-snapshot/defaults/main.yml | 9 ++++++++- ansible/roles/es-azure-snapshot/tasks/main.yml | 2 +- ansible/roles/es-gcs-snapshot/defaults/main.yml | 4 ++-- ansible/roles/es-s3-snapshot/defaults/main.yml | 4 ++-- ansible/roles/grafana-backup/defaults/main.yml | 7 +++++++ ansible/roles/grafana-backup/tasks/main.yml | 2 +- .../roles/jenkins-backup-upload/defaults/main.yml | 7 +++++++ .../roles/jenkins-backup-upload/tasks/main.yml | 2 +- ansible/roles/mongodb-backup/defaults/main.yml | 7 +++++++ ansible/roles/mongodb-backup/tasks/main.yml | 2 +- ansible/roles/offline-installer/defaults/main.yml | 7 +++++++ .../offline-installer/tasks/upload_to_storage.yml | 2 +- .../defaults/main.yml | 7 +++++++ .../tasks/main.yml | 2 +- .../defaults/main.yml | 7 +++++++ .../tasks/main.yml | 2 +- ansible/roles/postgresql-backup/defaults/main.yml | 7 ++++++- ansible/roles/postgresql-backup/tasks/main.yml | 2 +- .../roles/postgresql-restore/defaults/main.yml | 11 ++++++----- ansible/roles/postgresql-restore/tasks/main.yml | 2 +- .../roles/prometheus-backup-v2/defaults/main.yml | 9 ++++++++- ansible/roles/prometheus-backup-v2/tasks/main.yml | 2 +- ansible/roles/prometheus-backup/defaults/main.yml | 9 ++++++++- ansible/roles/prometheus-backup/tasks/main.yml | 2 +- .../roles/prometheus-restore/defaults/main.yml | 9 ++++++++- ansible/roles/prometheus-restore/tasks/main.yml | 2 +- ansible/roles/redis-backup/defaults/main.yml | 7 +++++++ ansible/roles/redis-backup/tasks/main.yml | 2 +- ansible/uploadFAQs.yml | 2 +- .../jobs/Kubernetes/jobs/UploadFAQs/config.xml | 2 +- .../Sunbird-RC/jobs/Upload_RC_Schema/config.xml | 2 +- pipelines/upload/faqs/Jenkinsfile | 2 +- 47 files changed, 206 insertions(+), 46 deletions(-) diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 8bc0ac9123..db14234e4a 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -2,10 +2,17 @@ - hosts: localhost vars_files: - ['{{inventory_dir}}/secrets.yml', 'secrets/{{env}}.yml'] + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + player_cdn_storage: "{{ player_cdn_container }}" tasks: - name: set common azure variables set_fact: - blob_container_name: "{{ player_cdn_container }}" + blob_container_name: "{{ player_cdn_storage }}" container_public_access: "container" blob_container_folder_path: "" storage_account_name: "{{ azure_public_storage_account_name }}" diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 7c4958a5f5..d1a0be8796 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -3,11 +3,18 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + plugin_storage: "{{ plugin_container_name }}" tasks: - name: rename env_domain in preview_cdn.html for CDN shell: | - echo "{{sunbird_portal_preview_cdn_url}}" - sed -i 's|cdn_url|{{sunbird_portal_preview_cdn_url}}|g' "{{currentws}}"/ansible/preview/preview_cdn.html + echo "{{ sunbird_portal_preview_cdn_url }}" + sed -i 's|cdn_url|{{ sunbird_portal_preview_cdn_url }}|g' "{{ currentws }}"/ansible/preview/preview_cdn.html when: sunbird_portal_preview_cdn_url is defined tags: - preview @@ -16,7 +23,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ plugin_container_name }}" + blob_container_name: "{{ plugin_storage }}" container_public_access: "container" blob_container_folder_path: "/{{ folder_name }}" storage_account_name: "{{ azure_public_storage_account_name }}" @@ -62,7 +69,7 @@ - block: - name: run the az_copy.sh script - shell: "bash {{ az_file_path }} {{ plugin_container_name }} {{ source_file }}" + shell: "bash {{ az_file_path }} {{ plugin_storage }} {{ source_file }}" async: 3600 poll: 10 environment: diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 7c7e992039..43d1789b00 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -1,12 +1,19 @@ - hosts: localhost vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + desktop_container: "{{ desktop_container_storage }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ desktop_container }}" + blob_container_name: "{{ desktop_container_storage }}" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 54b0672ed9..c846ecb95e 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -2,6 +2,13 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + dial_plugin_container_name: "{{ dial_plugin_storage }}" tasks: - name: Create directories file: @@ -24,7 +31,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ dial_plugin_container_name }}" + blob_container_name: "{{ dial_plugin_storage }}" container_public_access: "blob" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "dial_schema_template_files" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 843abfbd19..c13633e8ab 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -2,13 +2,20 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + plugin_container_name: "{{ plugin_storage }}" tasks: - name: upload batch of files to azure storage include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ plugin_container_name }}" + blob_container_name: "{{ plugin_storage }}" container_public_access: "container" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "{{ source_name }}" diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml index 48f59dd327..809d67b914 100644 --- a/ansible/nodebbui-upload.yml +++ b/ansible/nodebbui-upload.yml @@ -3,13 +3,20 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + nodebbui_container_name: "{{ nodebbui_storage }}" tasks: - name: delete files and folders from azure storage using azcopy include_role: name: azure-cloud-storage tasks_from: delete-using-azcopy.yml vars: - blob_container_name: "{{ nodebbui_container_name }}" + blob_container_name: "{{ nodebbui_storage }}" blob_container_folder_path: "" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" @@ -20,7 +27,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ nodebbui_container_name }}" + blob_container_name: "{{ nodebbui_storage }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ source_name }}" diff --git a/ansible/plugins.yml b/ansible/plugins.yml index 0245f1801a..487f5c780d 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -3,6 +3,13 @@ gather_facts: false vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + plugin_container_name: "{{ plugin_storage }}" tasks: - name: this block consists of tasks related to azure storage block: diff --git a/ansible/roles/cassandra-backup/defaults/main.yml b/ansible/roles/cassandra-backup/defaults/main.yml index 148bcf83b2..139fd1d810 100644 --- a/ansible/roles/cassandra-backup/defaults/main.yml +++ b/ansible/roles/cassandra-backup/defaults/main.yml @@ -1,3 +1,10 @@ cassandra_root_dir: '/etc/cassandra' data_dir: '/var/lib/cassandra/data' -cassandra_backup_azure_container_name: core-cassandra \ No newline at end of file +cassandra_backup_azure_container_name: core-cassandra + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index d6365315d6..ac0682c58a 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -37,7 +37,7 @@ name: azure-cloud-storage tasks_from: upload-using-azcopy.yml vars: - blob_container_name: "{{ cassandra_backup_azure_container_name }}" + blob_container_name: "{{ cassandra_backup_storage }}" container_public_access: "off" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_gzip_file_name }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/cassandra-restore/defaults/main.yml b/ansible/roles/cassandra-restore/defaults/main.yml index 6353e09287..4a4828144e 100644 --- a/ansible/roles/cassandra-restore/defaults/main.yml +++ b/ansible/roles/cassandra-restore/defaults/main.yml @@ -1 +1,8 @@ user_home: "/home/{{ ansible_ssh_user }}/" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 80b8f86863..717e2fe113 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -11,7 +11,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ cassandra_backup_azure_container_name }}" + blob_container_name: "{{ cassandra_backup_storage }}" blob_file_name: "{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/cert-templates/defaults/main.yml b/ansible/roles/cert-templates/defaults/main.yml index c8710dd9d9..c621d6ddb8 100644 --- a/ansible/roles/cert-templates/defaults/main.yml +++ b/ansible/roles/cert-templates/defaults/main.yml @@ -2,3 +2,10 @@ certs_badge_upload_retry_count: 3 certs_badge_criteria: "" certs_badge_batch_id: "" certs_badge_key_id: "" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +cert_service_storage: "{{ cert_service_container_name }}" \ No newline at end of file diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 321a91f139..dcbdeebadc 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -36,7 +36,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ cert_service_container_name }}" + blob_container_name: "{{ cert_service_storage }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index d71509fd05..ad3803dcd1 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -1,2 +1,9 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 963189ad4d..e7763604c1 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -53,7 +53,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ offline_installer_container_name }}" + blob_container_name: "{{ offline_installer_storage }}" container_public_access: "blob" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index 9072442956..f527096f18 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -10,4 +10,11 @@ snapshot_create_request_body: { es_snapshot_host: "localhost" snapshot_base_path: "default" -es_azure_backup_container_name: "elasticsearch-snapshots" \ No newline at end of file +es_azure_backup_container_name: "elasticsearch-snapshots" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +es_backup_storage: "{{ es_azure_backup_container_name }}" \ No newline at end of file diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index 2b7f23e576..e804b4344d 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: container-create.yml vars: - blob_container_name: "{{ es_azure_backup_container_name }}" + blob_container_name: "{{ es_backup_storage }}" container_public_access: "off" storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" diff --git a/ansible/roles/es-gcs-snapshot/defaults/main.yml b/ansible/roles/es-gcs-snapshot/defaults/main.yml index 3c2efa9a7f..5e3cbece6f 100644 --- a/ansible/roles/es-gcs-snapshot/defaults/main.yml +++ b/ansible/roles/es-gcs-snapshot/defaults/main.yml @@ -2,11 +2,11 @@ snapshot_create_request_body: { type: gcs, settings: { bucket: "{{ gcs_management_bucket_name }}", - base_path: "{{ es_gcs_backup_folder_name }}/{{ snapshot_base_path }}_{{ base_path_date }}" + base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_gcs_backup_folder_name: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file diff --git a/ansible/roles/es-s3-snapshot/defaults/main.yml b/ansible/roles/es-s3-snapshot/defaults/main.yml index 2ca18929b4..7ddda6ebd0 100644 --- a/ansible/roles/es-s3-snapshot/defaults/main.yml +++ b/ansible/roles/es-s3-snapshot/defaults/main.yml @@ -2,11 +2,11 @@ snapshot_create_request_body: { type: s3, settings: { bucket: "{{ aws_management_bucket_name }}", - base_path: "{{ es_s3_backup_folder_name }}/{{ snapshot_base_path }}_{{ base_path_date }}" + base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_s3_backup_folder_name: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file diff --git a/ansible/roles/grafana-backup/defaults/main.yml b/ansible/roles/grafana-backup/defaults/main.yml index b32dea6dde..fc62843964 100644 --- a/ansible/roles/grafana-backup/defaults/main.yml +++ b/ansible/roles/grafana-backup/defaults/main.yml @@ -5,3 +5,10 @@ grafana_data_dir: /var/dockerdata/grafana/grafana.db sunbird_management_storage_account_name: sunbird_management_storage_account_key: '' grafana_backup_azure_container_name: grafana-backup + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 786bd13442..c898ada0d5 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -24,7 +24,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ grafana_backup_azure_container_name }}" + blob_container_name: "{{ grafana_backup_storage }}" container_public_access: "off" blob_file_name: "{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml index 18ab7e816d..40a231d3d5 100644 --- a/ansible/roles/jenkins-backup-upload/defaults/main.yml +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -3,3 +3,10 @@ jenkins_group: jenkins jenkins_backup_base_dir: /var/lib/jenkins/jenkins-backup jenkins_backup_azure_container_name: jenkins-backup jenkins_backup_max_delay_in_days: 1 + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index e430e57c1c..d003bed89f 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -17,7 +17,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ jenkins_backup_azure_container_name }}" + blob_container_name: "{{ jenkins_backup_storage }}" container_public_access: "off" blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index 82a51650a5..d7b56ebefd 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -1,2 +1,9 @@ mongo_backup_dir: '/tmp/mongo-backup' mongo_backup_azure_container_name: "{{ mongo_backup_azure_container_name }}" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 4235e52c32..1eefe6b077 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -19,7 +19,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ mongo_backup_azure_container_name }}" + blob_container_name: "{{ mongo_backup_storage }}" container_public_access: "off" blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" diff --git a/ansible/roles/offline-installer/defaults/main.yml b/ansible/roles/offline-installer/defaults/main.yml index d71509fd05..ad3803dcd1 100644 --- a/ansible/roles/offline-installer/defaults/main.yml +++ b/ansible/roles/offline-installer/defaults/main.yml @@ -1,2 +1,9 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file diff --git a/ansible/roles/offline-installer/tasks/upload_to_storage.yml b/ansible/roles/offline-installer/tasks/upload_to_storage.yml index 3c62d6fc03..b8a68ba164 100644 --- a/ansible/roles/offline-installer/tasks/upload_to_storage.yml +++ b/ansible/roles/offline-installer/tasks/upload_to_storage.yml @@ -55,7 +55,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ offline_installer_container_name }}" + blob_container_name: "{{ offline_installer_storage }}" container_public_access: "blob" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" diff --git a/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml index bf43091813..6e637bf3ce 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml +++ b/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml @@ -8,3 +8,10 @@ db_name: postgres_admin_user: "{{sunbird_pg_user}}" postgres_hostname: "{{groups['postgresql-master-1'][0]}}" postgres_password: "{{postgres_password}}" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml index a64f3639af..a8261d91a3 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml @@ -46,7 +46,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_azure_container_name }}" + blob_container_name: "{{ postgresql_backup_storage }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml index 6a634e3bfd..4ac0d62151 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml +++ b/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml @@ -12,3 +12,10 @@ postgres_user: postgres_password: postgres_hostname: postgres_env: + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml index 135c29280c..61b1fe3eca 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml @@ -14,7 +14,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgres_backup_azure_container_name }}" + blob_container_name: "{{ postgres_backup_storage }}" blob_file_name: "{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index d64be512d8..f358e4f4f3 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -2,4 +2,9 @@ postgresql_backup_dir: /tmp/postgresql-backup postgresql_user: postgres postgresql_backup_azure_container_name: postgresql-backup -# Set these vars per environment as show in example below +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 2e25619a10..81ce384afa 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -18,7 +18,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_azure_container_name }}" + blob_container_name: "{{ postgresql_backup_storage }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml index 0c6b17f851..5f0708ed34 100644 --- a/ansible/roles/postgresql-restore/defaults/main.yml +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -5,8 +5,9 @@ postgresql_cluster_version: 9.5 postgresql_cluster_name: main postgresql_restore_azure_container_name: postgresql-backup -# Set these vars per environment as show in example below -# postgresql_restore_azure_storage_account_name: ntpbackupsstaging - -# Pass the parameter -# postgresql_restore_gzip_file_name: +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index 4075baa596..b95eff5751 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgresql_restore_azure_container_name }}" + blob_container_name: "{{ postgresql_restore_storage }}" blob_file_name: "{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/prometheus-backup-v2/defaults/main.yml b/ansible/roles/prometheus-backup-v2/defaults/main.yml index 0cd66df647..e3752a693f 100644 --- a/ansible/roles/prometheus-backup-v2/defaults/main.yml +++ b/ansible/roles/prometheus-backup-v2/defaults/main.yml @@ -1,3 +1,10 @@ --- # defaults file for ansible/roles/prometheus-backup-v2 -prometheus_backup_azure_container_name: prometheus-backup \ No newline at end of file +prometheus_backup_azure_container_name: prometheus-backup + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 6286f31ebb..071ed395e1 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -21,7 +21,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_storage }}" container_public_access: "off" blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" diff --git a/ansible/roles/prometheus-backup/defaults/main.yml b/ansible/roles/prometheus-backup/defaults/main.yml index dd43fbf572..17425092ee 100644 --- a/ansible/roles/prometheus-backup/defaults/main.yml +++ b/ansible/roles/prometheus-backup/defaults/main.yml @@ -4,4 +4,11 @@ prometheus_backup_azure_container_name: prometheus-backup # Set these vars per environment as show in example below # Override these values in group_vars backup_storage_name: backups -backup_storage_key: '' \ No newline at end of file +backup_storage_key: '' + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 1a71443c28..f9aaa54073 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -30,7 +30,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_storage }}" container_public_access: "off" blob_file_name: "{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" diff --git a/ansible/roles/prometheus-restore/defaults/main.yml b/ansible/roles/prometheus-restore/defaults/main.yml index eba83809ca..bee405457a 100644 --- a/ansible/roles/prometheus-restore/defaults/main.yml +++ b/ansible/roles/prometheus-restore/defaults/main.yml @@ -1,2 +1,9 @@ prometheus_backup_dir: /tmp/prometheus-backup -prometheus_backup_azure_container_name: prometheus-backup \ No newline at end of file +prometheus_backup_azure_container_name: prometheus-backup + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 9b2a176882..40c9bd9225 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -7,7 +7,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_storage }}" blob_file_name: "{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/redis-backup/defaults/main.yml b/ansible/roles/redis-backup/defaults/main.yml index e00b84ce4b..9f6055682a 100644 --- a/ansible/roles/redis-backup/defaults/main.yml +++ b/ansible/roles/redis-backup/defaults/main.yml @@ -1,3 +1,10 @@ redis_backup_dir: /tmp/redis-backup nodebb_redis_backup_azure_container_name: nodebb-redis-backup learner_user: learning + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index fa621b4d6d..be66ea5292 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -18,7 +18,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ nodebb_redis_backup_azure_container_name }}" + blob_container_name: "{{ nodebb_redis_backup_storage }}" container_public_access: "off" blob_file_name: "{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index a4da2d4ede..7109a65f68 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -20,7 +20,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ blob_container }}" + blob_container_name: "{{ container_name }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml index c476a3de0f..b4ae2238c9 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - blob_container + storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml index 997794c6dc..816c4f9f49 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - blob_container + storage diff --git a/pipelines/upload/faqs/Jenkinsfile b/pipelines/upload/faqs/Jenkinsfile index 00588a8dd7..ee68678781 100644 --- a/pipelines/upload/faqs/Jenkinsfile +++ b/pipelines/upload/faqs/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/uploadFAQs.yml" - ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"container_name=${params.blob_container} source_folder=${params.source_folder} destination_path=${params.blob_container}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"container_name=${params.storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From e4d9a4a255502a35658d7fa409f596b8848c095d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 23 Sep 2022 19:25:41 +0530 Subject: [PATCH 006/203] fix: modified vars to use generic names Signed-off-by: Keshav Prasad --- ansible/uploadFAQs.yml | 2 +- .../Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml | 2 +- .../jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml | 2 +- pipelines/upload/faqs/Jenkinsfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 7109a65f68..8447fe4e47 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -20,7 +20,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ container_name }}" + blob_container_name: "{{ upload_storage }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml index b4ae2238c9..85b7c81efb 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - storage + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml index 816c4f9f49..ea47b8d14e 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - storage + upload_storage diff --git a/pipelines/upload/faqs/Jenkinsfile b/pipelines/upload/faqs/Jenkinsfile index ee68678781..4f18801b4e 100644 --- a/pipelines/upload/faqs/Jenkinsfile +++ b/pipelines/upload/faqs/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/uploadFAQs.yml" - ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"container_name=${params.storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"upload_storage=${params.upload_storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 89877d8c217c9e0b1b05611fcfaf4bbfe8114222 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Sat, 24 Sep 2022 18:17:11 +0530 Subject: [PATCH 007/203] fix: incorrect variable assignments Signed-off-by: Keshav Prasad --- ansible/desktop-faq-upload.yml | 2 +- ansible/dial_upload-schema.yml | 2 +- ansible/kp_upload-schema.yml | 2 +- ansible/nodebbui-upload.yml | 2 +- ansible/plugins.yml | 4 ++-- ansible/roles/es-azure-snapshot/defaults/main.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 43d1789b00..c17f7e9b9a 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -7,7 +7,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - desktop_container: "{{ desktop_container_storage }}" + desktop_container_storage: "{{ desktop_container }}" tasks: - name: this block consists of tasks related to azure storage block: diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index c846ecb95e..ba7abf627b 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -8,7 +8,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - dial_plugin_container_name: "{{ dial_plugin_storage }}" + dial_plugin_storage: "{{ dial_plugin_container_name }}" tasks: - name: Create directories file: diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index c13633e8ab..7d7163437b 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -8,7 +8,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - plugin_container_name: "{{ plugin_storage }}" + plugin_storage: "{{ plugin_container_name }}" tasks: - name: upload batch of files to azure storage include_role: diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml index 809d67b914..3c0bf414ae 100644 --- a/ansible/nodebbui-upload.yml +++ b/ansible/nodebbui-upload.yml @@ -9,7 +9,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - nodebbui_container_name: "{{ nodebbui_storage }}" + nodebbui_storage: "{{ nodebbui_container_name }}" tasks: - name: delete files and folders from azure storage using azcopy include_role: diff --git a/ansible/plugins.yml b/ansible/plugins.yml index 487f5c780d..35e34578d0 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -9,13 +9,13 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - plugin_container_name: "{{ plugin_storage }}" + plugin_storage: "{{ plugin_container_name }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ plugin_container_name }}" + blob_container_name: "{{ plugin_storage }}" container_public_access: "container" blob_delete_pattern: "content-plugins/{{ plugins_name }}" blob_container_folder_path: "/content-plugins/{{ plugins_name }}" diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index f527096f18..396746aa32 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -1,7 +1,7 @@ snapshot_create_request_body: { type: azure, settings: { - container: "{{ es_azure_backup_folder_name }}", + container: "{{ es_backup_storage }}", base_path: "{{ snapshot_base_path }}_{{ base_path_date }}" } } From 4c373b9d2d05eb3ea7e81b62fa4541333c9c5860 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Sat, 24 Sep 2022 18:41:12 +0530 Subject: [PATCH 008/203] fix: moving comments section Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/secrets.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index c373fa8c4f..f157d85862 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -8,8 +8,6 @@ core_vault_docker_registry_url: "change.docker.url" # for docker hub "https core_vault_docker_registry_user: "change.docker.username" core_vault_docker_registry_password: "change.docker.password" -# Define the below if you are using Azure Cloud -# Management Storage Account # Run the below command in shell # date +'%Y-%m-%dT%H:%m:%SZ' -d '+1 year' # sas_token=?`az storage account generate-sas --account-name "{{ azure_plugin_storage_account_name }}" --account-key "{{ azure_plugin_storage_account_key }}" --expiry $sas_expire_time --https-only --permissions acdlpruw --resource-types sco --services bfqt | xargs` @@ -23,6 +21,8 @@ sunbird_private_storage_account_key: "change.azure.storage.account.key" sunbird_management_storage_account_key: "change.azure.storage.account.key" sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" +# Define the below if you are using Azure Cloud +# Management Storage Account azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" From 516a76f92797a9e3529ebbc0520064862ca29ed0 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Sat, 24 Sep 2022 19:09:40 +0530 Subject: [PATCH 009/203] fix: add tags for set_fact task Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 4 +++- ansible/desktop-faq-upload.yml | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index d1a0be8796..be7a6054be 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -29,12 +29,14 @@ storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + tags: + - always - block: - name: delete files and folders from azure storage using azcopy include_role: name: azure-cloud-storage - tasks_from: delete-using-azcopy.yml + tasks_from: delete-using-azcopy.yml tags: - content-editor - collection-editor diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index c17f7e9b9a..02f29db01d 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -17,6 +17,8 @@ blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + tags: + - always - block: - name: upload file to azure storage From 99d47256e22674380a93bb9e9a3a753b4044f6a7 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 12:23:38 +0530 Subject: [PATCH 010/203] fix: invoke blob upload role, moved few vars Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index be7a6054be..ef3ea0b44a 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -25,7 +25,6 @@ set_fact: blob_container_name: "{{ plugin_storage }}" container_public_access: "container" - blob_container_folder_path: "/{{ folder_name }}" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" @@ -37,6 +36,8 @@ include_role: name: azure-cloud-storage tasks_from: delete-using-azcopy.yml + vars: + blob_container_folder_path: "/{{ folder_name }}" tags: - content-editor - collection-editor @@ -49,6 +50,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: + blob_container_folder_path: "/{{ folder_name }}" local_file_or_folder_path: "{{ source_name }}" tags: - content-editor @@ -62,7 +64,7 @@ - name: upload file to azure storage include_role: name: azure-cloud-storage - tasks_from: blob-upload-batch.yml + tasks_from: blob-upload.yml vars: blob_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" local_file_or_folder_path: "{{ source_file_name }}" From d7d98695bd236685d24106c3b912d8624a30b660 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 12:48:52 +0530 Subject: [PATCH 011/203] fix: adding build description Signed-off-by: Keshav Prasad --- pipelines/upload/chatbot/Jenkinsfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index 6aaf6d0c39..70910ef261 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -47,6 +47,8 @@ node() { values.put('ansibleExtraArgs', ansibleExtraArgs) println values ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } } summary() From 498dbf13d73b8bc9e941df0242d94408e7a4495f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 13:29:57 +0530 Subject: [PATCH 012/203] fix: updated jenkins job param for container path Signed-off-by: Keshav Prasad --- ansible/desktop-faq-upload.yml | 4 ++-- .../dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 02f29db01d..a2789218c2 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -7,13 +7,13 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - desktop_container_storage: "{{ desktop_container }}" + desktop_storage: "{{ desktop_container }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ desktop_container_storage }}" + blob_container_name: "{{ desktop_storage }}" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml index c0289cf30f..ec3dbfe716 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml @@ -102,7 +102,7 @@ return """<b>This parameter is not used</b>""" destination_path - chatbot/router/config + router/config false From 10dec83ed4999e809c7569ce63fd33cd22a11fc2 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 13:53:23 +0530 Subject: [PATCH 013/203] fix: updated jenkins jobs and params Signed-off-by: Keshav Prasad --- .../config.xml | 2 +- pipelines/upload/chatbot/Jenkinsfile | 5 +++++ pipelines/upload/portal-csv/Jenkinsfile | 7 +++++++ 3 files changed, 13 insertions(+), 1 deletion(-) rename deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/{Upload_CollectionHierarchy_CSV => UploadCollectionHierarchyCSV}/config.xml (99%) diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/Upload_CollectionHierarchy_CSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml similarity index 99% rename from deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/Upload_CollectionHierarchy_CSV/config.xml rename to deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 4818cfc99b..314e1bcd74 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/Upload_CollectionHierarchy_CSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -125,7 +125,7 @@ return """<b>This parameter is not used</b>""" - sourcing/collection-hierarchy + collection-hierarchy diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index 70910ef261..764e73c9ba 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -54,7 +54,12 @@ node() { summary() } catch (err) { + currentBuild.result = 'FAILURE' throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() } } diff --git a/pipelines/upload/portal-csv/Jenkinsfile b/pipelines/upload/portal-csv/Jenkinsfile index c8194f6694..19a0782e81 100644 --- a/pipelines/upload/portal-csv/Jenkinsfile +++ b/pipelines/upload/portal-csv/Jenkinsfile @@ -36,11 +36,18 @@ node() { values.put('ansibleExtraArgs', ansibleExtraArgs) println values ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } } summary() } catch (err) { + currentBuild.result = 'FAILURE' throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() } } From d1afa413d1aaac161c13457d48db7ddb6e738651 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 15:09:06 +0530 Subject: [PATCH 014/203] fix: renamed jenkins parameters, added missing jenkins job Signed-off-by: Keshav Prasad --- ansible/desktop-faq-upload.yml | 9 +- .../UploadCollectionHierarchyCSV/config.xml | 2 +- .../jobs/UploadChatbotConfig/config.xml | 2 +- .../UploadCollectionHierarchyCSV/config.xml | 2 +- .../jobs/UploadDiscussionUIDocs/config.xml | 244 ++++++++++++++++++ pipelines/deploy/desktop-faq/Jenkinsfile | 2 +- pipelines/upload/chatbot/Jenkinsfile | 2 +- pipelines/upload/discussion-UI/Jenkinsfile | 2 +- pipelines/upload/portal-csv/Jenkinsfile | 2 +- 9 files changed, 252 insertions(+), 15 deletions(-) create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index a2789218c2..0cdb89a07d 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -1,19 +1,12 @@ - hosts: localhost vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - desktop_storage: "{{ desktop_container }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ desktop_storage }}" + blob_container_name: "{{ upload_storage }}" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 0272d155c6..0236cab0eb 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -103,7 +103,7 @@ return """<b>This parameter is not used</b>""" false - container_name + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml index ec3dbfe716..defc3a0ddd 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml @@ -85,7 +85,7 @@ return """<b>This parameter is not used</b>""" false - container_name + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 314e1bcd74..d87aac4ee3 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -103,7 +103,7 @@ return """<b>This parameter is not used</b>""" false - container_name + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml new file mode 100644 index 0000000000..94f6f3a64d --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml @@ -0,0 +1,244 @@ + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + ArtifactUpload/dev/Core/DiscussionsUI + false + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Download the artifact from azure blob, JenkinsJob - Use the atrifact from Jenkins job.</b></font> + choice-parameter-9600649228560 + 1 + + true + + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + + PT_SINGLE_SELECT + false + 1 + + + build_number + + choice-parameter-9600651313765 + 1 + + true + + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + artifact_source + ET_FORMATTED_HTML + true + + + artifact_version + + choice-parameter-9600653373369 + 1 + + true + + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + artifact_source + ET_FORMATTED_HTML + true + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + + ET_FORMATTED_HTML + true + + + upload_storage + + + + discussion-ui + + + + + source_path + + + + documentation + + + + + destination_path + + + + discussion-ui/documentation + + + + + tag + + + + upload-batch + + + + + + + 0 + 0 + + false + project + false + + + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/upload/discussion-UI/Jenkinsfile + false + + + false + diff --git a/pipelines/deploy/desktop-faq/Jenkinsfile b/pipelines/deploy/desktop-faq/Jenkinsfile index 620c5c2f5f..d282ec2884 100644 --- a/pipelines/deploy/desktop-faq/Jenkinsfile +++ b/pipelines/deploy/desktop-faq/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.desktop_container} src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index 764e73c9ba..c97597c44c 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -38,7 +38,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.container_name} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/discussion-UI/Jenkinsfile b/pipelines/upload/discussion-UI/Jenkinsfile index 4de3383796..c4d794fb3e 100644 --- a/pipelines/upload/discussion-UI/Jenkinsfile +++ b/pipelines/upload/discussion-UI/Jenkinsfile @@ -30,7 +30,7 @@ node() { unzip ${artifact} """ ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.container_name} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/pipelines/upload/portal-csv/Jenkinsfile b/pipelines/upload/portal-csv/Jenkinsfile index 19a0782e81..6e8453d3e2 100644 --- a/pipelines/upload/portal-csv/Jenkinsfile +++ b/pipelines/upload/portal-csv/Jenkinsfile @@ -27,7 +27,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.container_name} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 9887254ffcdb64601c2f190b408e34021b09525a Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 15:38:00 +0530 Subject: [PATCH 015/203] fix: added missing jobs Signed-off-by: Keshav Prasad --- .../jobs/UploadDiscussionUIDocs/config.xml | 2 +- .../jobs/UploadPortalLabel/config.xml | 189 ++++++++++++++++++ 2 files changed, 190 insertions(+), 1 deletion(-) create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml index 94f6f3a64d..a801645925 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml @@ -182,7 +182,7 @@ return """<b>This parameter is not used</b>""" - discussion-ui/documentation + documentation diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml new file mode 100644 index 0000000000..a75d9ee220 --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml @@ -0,0 +1,189 @@ + + + + false + + + + -1 + -1 + -1 + 5 + + + + + false + false + + + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + UploadPortalLabel + Deploy/dev/Kubernetes/UploadPortalLabel + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + UploadPortalLabel + Deploy/dev/Kubernetes/UploadPortalLabel + + + ET_FORMATTED_HTML + true + + + upload_storage + + + + label + + + + + destination_path + + + + all_labels_ta.json + all_labels_bn.json + all_labels_en.json + all_labels_hi.json + all_labels_kn.json + all_labels_mr.json + all_labels_te.json + all_labels_ur.json + all_labels_gu.json + all_labels_ml.json + all_labels_as.json + all_labels_or.json + + + + + src_file_path + + + + utils/portal/labels/all_labels_ta.json + utils/portal/labels/all_labels_bn.json + utils/portal/labels/all_labels_en.json + utils/portal/labels/all_labels_hi.json + utils/portal/labels/all_labels_kn.json + utils/portal/labels/all_labels_mr.json + utils/portal/labels/all_labels_te.json + utils/portal/labels/all_labels_ur.json + utils/portal/labels/all_labels_gu.json + utils/portal/labels/all_labels_ml.json + utils/portal/labels/all_labels_as.json + utils/portal/labels/all_labels_or.json + + + + + tag + + + + upload-label + + + + + + + 0 + 0 + + false + project + false + + + + + + + + + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/deploy/desktop-faq/Jenkinsfile + false + + + false + From 01975c547fdc37826bf51ffee0fb78caf4102ff9 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 15:48:38 +0530 Subject: [PATCH 016/203] fix: updated schema folder name Signed-off-by: Keshav Prasad --- pipelines/upload/schema/dial/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index 1a0216c740..44cf0bca8b 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -29,7 +29,7 @@ node() { git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" - ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/schemas \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 1406ea07d710241f779bbfe4a5cdbd651c5dfe7d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 19:36:38 +0530 Subject: [PATCH 017/203] fix: revert source folder changes Signed-off-by: Keshav Prasad --- pipelines/upload/schema/dial/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index 44cf0bca8b..dd74b2f23f 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -29,7 +29,7 @@ node() { git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" - ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/schemas\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 8c4d0297587c1931bf5c7f83c17bbe3b846a0e65 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 11:19:18 +0530 Subject: [PATCH 018/203] fix: renamed jobs, formatted lines, deleted unsued plays Signed-off-by: Keshav Prasad --- ansible/nodebbui-upload.yml | 36 ----------- .../config.xml | 0 pipelines/certs-templates/Jenkinsfile | 59 ++++++++++--------- pipelines/deploy/NodebbUI/Jenkinsfile | 58 ------------------ 4 files changed, 30 insertions(+), 123 deletions(-) delete mode 100644 ansible/nodebbui-upload.yml rename deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/{Upload_RC_Schema => UploadRCSchema}/config.xml (100%) delete mode 100644 pipelines/deploy/NodebbUI/Jenkinsfile diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml deleted file mode 100644 index 3c0bf414ae..0000000000 --- a/ansible/nodebbui-upload.yml +++ /dev/null @@ -1,36 +0,0 @@ -- hosts: local - become: yes - gather_facts: no - vars_files: - - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - nodebbui_storage: "{{ nodebbui_container_name }}" - tasks: - - name: delete files and folders from azure storage using azcopy - include_role: - name: azure-cloud-storage - tasks_from: delete-using-azcopy.yml - vars: - blob_container_name: "{{ nodebbui_storage }}" - blob_container_folder_path: "" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - when: cloud_service_provider == "azure" - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_name: "{{ nodebbui_storage }}" - container_public_access: "container" - blob_container_folder_path: "" - local_file_or_folder_path: "{{ source_name }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml similarity index 100% rename from deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml rename to deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml diff --git a/pipelines/certs-templates/Jenkinsfile b/pipelines/certs-templates/Jenkinsfile index ea4617aa53..74fea3b5e6 100644 --- a/pipelines/certs-templates/Jenkinsfile +++ b/pipelines/certs-templates/Jenkinsfile @@ -10,39 +10,40 @@ node() { stage('checkout utils repo') { cleanWs() checkout scm - sh """ - git clone https://github.com/project-sunbird/sunbird-utils.git -b ${sunbird_util_branch_or_tag} cert-templates - """ + sh "git clone https://github.com/project-sunbird/sunbird-utils.git -b ${sunbird_util_branch_or_tag} cert-templates" } - ansiColor('xterm') { - stage('inject vars') { - values = [:] - currentWs = sh(returnStdout: true, script: 'pwd').trim() - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - - ansiblePlaybook = "${currentWs}/ansible/cert-templates.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass " - if (params.badgeType == "createBadge") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" - } else if (params.badgeType == "createIssuer") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" - }else if (params.badgeType == "createPublicKey") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" - }else { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" - } - + ansiColor('xterm') { + stage('inject vars') { + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/cert-templates.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" + if (params.badgeType == "createBadge") { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" + } + else if (params.badgeType == "createIssuer") { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" + } + else if (params.badgeType == "createPublicKey") { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" + } + else { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" + } values.put('currentWs', currentWs) values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - } + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } + } } catch (err) { currentBuild.result = "FAILURE" diff --git a/pipelines/deploy/NodebbUI/Jenkinsfile b/pipelines/deploy/NodebbUI/Jenkinsfile deleted file mode 100644 index 150a57f442..0000000000 --- a/pipelines/deploy/NodebbUI/Jenkinsfile +++ /dev/null @@ -1,58 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - values = lp_dp_params() - values.put('module', 'Core') - stage('get artifact') { - currentWs = sh(returnStdout: true, script: 'pwd').trim() - artifact = values.artifact_name + ":" + values.artifact_version - values.put('currentWs', currentWs) - values.put('artifact', artifact) - artifact_download(values) - } - stage('deploy artifact'){ - sh """ - unzip -o ${artifact} -d discussion-ui - """ - - ansiblePlaybook = "${currentWs}/ansible/nodebbui-upload.yml" - ansibleExtraArgs = "--extra-vars \"source_name=${currentWs}/discussion-ui\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - - currentBuild.result = 'SUCCESS' - archiveArtifacts artifacts: "${artifact}", fingerprint: true, onlyIfSuccessful: true - archiveArtifacts artifacts: 'metadata.json', onlyIfSuccessful: true - currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - summary() - } - catch (err) { - currentBuild.result = 'FAILURE' - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} From 95de617943d898651349109f64fdf7741444fa2f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 14:59:33 +0530 Subject: [PATCH 019/203] fix: remove debug statements Signed-off-by: Keshav Prasad --- ansible/roles/cert-templates/tasks/main.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index dcbdeebadc..93619c9394 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -44,13 +44,6 @@ storage_account_key: "{{ azure_private_storage_account_key }}" when: cloud_service_provider == "azure" -- name: list all the files - shell: "ls -lR {{cert_location}}" - register: allfiles - -- debug: - var: allfiles - - name: Remove unwanted files file: path: "{{cert_location}}/cert-templates" From 4c20bc6d3837ae31b3e5c4b44fb9719688fac094 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 15:15:36 +0530 Subject: [PATCH 020/203] fix: formatting, don't remove files to make debug easy Signed-off-by: Keshav Prasad --- ansible/roles/cert-templates/tasks/main.yml | 7 +------ pipelines/certs-templates/Jenkinsfile | 8 ++++---- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 93619c9394..ee05f2adb3 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -42,9 +42,4 @@ local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" storage_account_name: "{{ azure_private_storage_account_name }}" storage_account_key: "{{ azure_private_storage_account_key }}" - when: cloud_service_provider == "azure" - -- name: Remove unwanted files - file: - path: "{{cert_location}}/cert-templates" - state: absent + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/pipelines/certs-templates/Jenkinsfile b/pipelines/certs-templates/Jenkinsfile index 74fea3b5e6..eebc455109 100644 --- a/pipelines/certs-templates/Jenkinsfile +++ b/pipelines/certs-templates/Jenkinsfile @@ -21,16 +21,16 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() ansiblePlaybook = "${currentWs}/ansible/cert-templates.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" if (params.badgeType == "createBadge") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" } else if (params.badgeType == "createIssuer") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" } else if (params.badgeType == "createPublicKey") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" } else { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" } values.put('currentWs', currentWs) values.put('env', envDir) From 16db27a09088927388f024c82266542dafa6a9ac Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 16:22:27 +0530 Subject: [PATCH 021/203] fix: added missing jobs, clean up unused files Signed-off-by: Keshav Prasad --- .../Core/jobs/OfflineInstaller/config.xml | 108 ++++++++ .../jobs/OfflineInstaller/config.xml | 235 ++++++++++++++++++ pipelines/offlineinstaller/Jenkinsfile.Deploy | 80 ------ 3 files changed, 343 insertions(+), 80 deletions(-) create mode 100644 deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml delete mode 100644 pipelines/offlineinstaller/Jenkinsfile.Deploy diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml new file mode 100644 index 0000000000..cbff43d2d0 --- /dev/null +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml @@ -0,0 +1,108 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + -1 + -1 + 5 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + Build/Core/OfflineInstaller + false + + + build_number + <font color=darkgreen size=2><b>OPTIONAL: Specify the build job number to upload / copy the artifact built in that job.</b></font> + lastSuccessfulBuild + false + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - Just keep a copy of the artifact in Jenkins.</b></font> + + + ArtifactRepo + JenkinsJob + + + + + + + 0 + 0 + + false + project + false + + + + + + + + Build/Core/OfflineInstaller + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${public_repo_branch} + + + false + + + + true + false + + 0 + false + + + + pipelines/upload/artifacts/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml new file mode 100644 index 0000000000..0544e9948d --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml @@ -0,0 +1,235 @@ + + + + false + + + + -1 + -1 + -1 + 5 + + + + + + ArtifactUpload/dev/Core/OfflineInstaller + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + ArtifactUpload/dev/Core/OfflineInstaller + false + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + + ET_FORMATTED_HTML + true + + + offline_installer_type + <font color=dimgray size=2><b>Choose the type of installer you wanted to build</b></font> + + + windows64bit + linux64bit + windows32bit + + + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Download the artifact from azure blob, JenkinsJob - Use the atrifact from Jenkins job.</b></font> + choice-parameter-1754928650096303 + 1 + + true + + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + + PT_SINGLE_SELECT + false + 1 + + + build_number + + choice-parameter-1754928651800681 + 1 + + true + + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + artifact_source + ET_FORMATTED_HTML + true + + + artifact_version + + choice-parameter-1754928653885653 + 1 + + true + + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + artifact_source + ET_FORMATTED_HTML + true + + + + + 0 + 0 + + false + project + false + + + + + + + + ArtifactUpload/dev/Core/OfflineInstaller + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 1 + false + + + + pipelines/offlineinstaller/Jenkinsfile + false + + + false + diff --git a/pipelines/offlineinstaller/Jenkinsfile.Deploy b/pipelines/offlineinstaller/Jenkinsfile.Deploy deleted file mode 100644 index 710cfebc48..0000000000 --- a/pipelines/offlineinstaller/Jenkinsfile.Deploy +++ /dev/null @@ -1,80 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - cleanWs() - checkout scm - } - ansiColor('xterm') { - values = lp_dp_params() - stage('get artifact') { - currentWs = sh(returnStdout: true, script: 'pwd').trim() - artifact = values.artifact_name + ":" + values.artifact_version - values.put('currentWs', currentWs) - values.put('artifact', artifact) - artifact_download(values) - } - stage('deploy artifact') { - sh """ - unzip ${artifact} - mkdir offline-installer-repo - tar -xvzf src.tar.gz -C offline-installer-repo/ - """ - - ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml" - ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${params.offline_installer_type}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = "SUCCESS" - currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - archiveArtifacts artifacts: "${artifact}", fingerprint: true, onlyIfSuccessful: true - archiveArtifacts artifacts: 'metadata.json', onlyIfSuccessful: true - } - - try { - stage('Build Installer, create and upload it to azure') { - dir('offline-installer-repo') { - sh """ - bash -x build.sh - """ - } - ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass " - ansibleExtraArgs = " --extra-vars \"offline_repo_location=$currentWs uploadInstaller=True offline_installer_type=${offline_installer_type}\"" - values.put('currentWs', currentWs) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - archiveArtifacts artifacts: '*.zip', onlyIfSuccessful: true - archiveArtifacts artifacts: 'latest.json', onlyIfSuccessful: true - } - } - catch (err) { - ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass " - ansibleExtraArgs = " --extra-vars \"offline_repo_location=$currentWs removeOfflineInstallerFolder=True offline_installer_type=${offline_installer_type}\"" - values.put('currentWs', currentWs) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - } - } - } - catch (err) { - currentBuild.result = "FAILURE" - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} From ea59c10aae333b9bea41fc97d38595327b4831a5 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 16:28:16 +0530 Subject: [PATCH 022/203] fix: adding default offline store value Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 2 -- ansible/roles/desktop-deploy/defaults/main.yml | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 1f7bc11430..deec3a4a2a 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -502,8 +502,6 @@ content_import_remove_props: '["downloadUrl","variants","previewUrl","streamingU sunbird_portal_updateLoginTimeEnabled: false # Desktop app vars -#sunbird_offline_azure_storage_account: "" #added this var for adopter usecase -offline_installer_container_name: "" #added this var for adopter usecase cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # Search-service diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index ad3803dcd1..3010db2349 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -1,5 +1,6 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" +offline_installer_container_name: "{{env}}-offlineinstaller" # This variable is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name From 306923f43f422ab4f5716c35fb2301b808fbd7ec Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 17:01:49 +0530 Subject: [PATCH 023/203] fix: clean up unused roles Signed-off-by: Keshav Prasad --- .../roles/offline-installer/defaults/main.yml | 9 -- .../roles/offline-installer/tasks/main.yml | 50 --------- .../roles/offline-installer/tasks/remove.yml | 8 -- .../tasks/upload_to_storage.yml | 103 ------------------ .../templates/32-bit-prerequisite.sh.j2 | 7 -- .../offline-installer/templates/Dockerfile.j2 | 13 --- .../templates/artifacts.sh.j2 | 15 --- .../offline-installer/templates/build.sh.j2 | 18 --- .../offline-installer/templates/env.json.j2 | 10 -- .../offline-installer/templates/envfile.j2 | 3 - .../templates/metadata.sh.j2 | 8 -- .../templates/setupOfflineInstaller.sh.j2 | 25 ----- pipelines/offlineinstaller/Jenkinsfile | 2 +- 13 files changed, 1 insertion(+), 270 deletions(-) delete mode 100644 ansible/roles/offline-installer/defaults/main.yml delete mode 100644 ansible/roles/offline-installer/tasks/main.yml delete mode 100644 ansible/roles/offline-installer/tasks/remove.yml delete mode 100644 ansible/roles/offline-installer/tasks/upload_to_storage.yml delete mode 100644 ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/Dockerfile.j2 delete mode 100644 ansible/roles/offline-installer/templates/artifacts.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/build.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/env.json.j2 delete mode 100644 ansible/roles/offline-installer/templates/envfile.j2 delete mode 100644 ansible/roles/offline-installer/templates/metadata.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 diff --git a/ansible/roles/offline-installer/defaults/main.yml b/ansible/roles/offline-installer/defaults/main.yml deleted file mode 100644 index ad3803dcd1..0000000000 --- a/ansible/roles/offline-installer/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file diff --git a/ansible/roles/offline-installer/tasks/main.yml b/ansible/roles/offline-installer/tasks/main.yml deleted file mode 100644 index e110fd3116..0000000000 --- a/ansible/roles/offline-installer/tasks/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: get the date and time for the artifact - set_fact: - time: "{{ lookup('pipe', 'date +\"%Y-%b-%d-%H-%M-%S\"') }}-{{offline_installer_type}}" - when: uploadInstaller is not defined - -- name: copy the env.json file to the repo - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/src/{{item}}" - mode: '0755' - with_items: - - env.json - when: uploadInstaller is not defined - -- name: copy the installer script file and build script for building offline installer - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/{{item}}" - mode: '0755' - with_items: - - build.sh - - envfile - - 32-bit-prerequisite.sh - when: uploadInstaller is not defined - -- name: create a directory to store artifacts - file: - path: "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/{{time}}" - state: directory - recurse: yes - when: uploadInstaller is not defined - -- name: copy the installer script file and build script for building offline installer - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/{{item}}" - mode: '0755' - with_items: - - setupOfflineInstaller.sh - when: uploadInstaller is not defined - -- name: upload to azure - include: upload_to_storage.yml - when: uploadInstaller is defined - -- name: Delete offline installer folder if any issue - include: remove.yml - when: removeOfflineInstallerFolder is defined - diff --git a/ansible/roles/offline-installer/tasks/remove.yml b/ansible/roles/offline-installer/tasks/remove.yml deleted file mode 100644 index da1512de90..0000000000 --- a/ansible/roles/offline-installer/tasks/remove.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: Delete offline installer repo - file: - path: "{{offline_repo_location}}/offline-installer-repo/" - state: absent - -- name: Notify build failure - fail: - msg: "Please check the build script, it had been failed" diff --git a/ansible/roles/offline-installer/tasks/upload_to_storage.yml b/ansible/roles/offline-installer/tasks/upload_to_storage.yml deleted file mode 100644 index b8a68ba164..0000000000 --- a/ansible/roles/offline-installer/tasks/upload_to_storage.yml +++ /dev/null @@ -1,103 +0,0 @@ ---- -- name: Get the environment name for the artifact name - shell: "cat {{offline_repo_location}}/offline-installer-repo/src/package.json | jq -r '.name'" - register: env_name - -- name: Display the environment name of the installer - debug: - msg: "{{env_name.stdout}}" - -- name: Create a variable to inject environment name to upload to azure blob - set_fact: - environment_name: "{{ env_name.stdout }}" - -- name: Get the version from the package.json file - shell: "cat {{offline_repo_location}}/offline-installer-repo/src/package.json | jq -r '.version'" - register: version - -- name: Display the version number of the installer - debug: - msg: "{{version.stdout}}" - -- name: Create a variable to inject version in the template - set_fact: - installer_version: "{{ version.stdout }}" - -- name: get the directory name - shell: "ls {{offline_repo_location}}/offline-installer-repo/offline_artifacts/" - register: folderName - -- debug: - msg: "{{folderName.stdout}}" - -- name: set the folder name to copy the artifacts - set_fact: - time: "{{folderName.stdout}}" - -- name: copy the installer artifacts and metadata files to upload it to azure blob and generate latest.json file - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/{{item}}" - mode: '0755' - with_items: - - artifacts.sh - - metadata.sh - -- name: copy the artifacts and generate the metadata file - shell: "bash {{offline_repo_location}}/offline-installer-repo/{{item}}" - args: - chdir: "{{offline_repo_location}}/offline-installer-repo/" - with_items: - - artifacts.sh - - metadata.sh - -- name: this block consists of tasks related to azure storage - block: - - name: set common azure variables - set_fact: - blob_container_name: "{{ offline_installer_storage }}" - container_public_access: "blob" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_folder_path: "" - local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts" - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_folder_path: "/latest" - local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts/{{ folderName.stdout }}" - when: cloud_service_provider == "azure" - -- name: Create a zip of the folder to archieve the artifact - archive: - path: - - "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/{{folderName.stdout}}" - dest: "{{offline_repo_location}}/{{offline_installer_type}}.zip" - owner: jenkins - group: jenkins - format: zip - -- name: copy latest.json file to archieve it in jenkins - copy: - src: "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/{{folderName.stdout}}/latest.json" - dest: "{{offline_repo_location}}/latest.json" - owner: jenkins - group: jenkins - remote_src: yes - -- name: change the ownership of the directory to jenkins user - file: - path: "{{offline_repo_location}}" - state: directory - recurse: yes - owner: jenkins - group: jenkins \ No newline at end of file diff --git a/ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 b/ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 deleted file mode 100644 index cbfa755b0e..0000000000 --- a/ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -#Build the offline installer -cd /private/src/ -rm -rf node_modules -npm install leveldown --verbose -npm run dist diff --git a/ansible/roles/offline-installer/templates/Dockerfile.j2 b/ansible/roles/offline-installer/templates/Dockerfile.j2 deleted file mode 100644 index 348c4c6e0a..0000000000 --- a/ansible/roles/offline-installer/templates/Dockerfile.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#FROM electronuserland/builder:wine -#MAINTAINER "S M Y ALTAMASH" "" -#ENV ELECTRON_CACHE="/root/.cache/electron" -#ENV ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" -#ENV GITHUB_ACCESS_TOKEN={{offline_git_access_token}} -#ENV GITHUB_PRIVATE_REPO={{offline_git_private_repo}} -#ENV TARGET_ENVIRONMENT={{offline_target_env}} -#WORKDIR /private/ -#ADD . /private/ -#WORKDIR /private/src/ -#CMD npm install && npm run dist -#CMD npm run dist-win64 -#CMD npm run dist-linux diff --git a/ansible/roles/offline-installer/templates/artifacts.sh.j2 b/ansible/roles/offline-installer/templates/artifacts.sh.j2 deleted file mode 100644 index ea5db269de..0000000000 --- a/ansible/roles/offline-installer/templates/artifacts.sh.j2 +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - - -if [ "{{offline_installer_type}}" == "windows32bit" ]; -then - cp '{{offline_repo_location}}/offline-installer-repo/src/dist/{{installer_version}}/win/ia32/{{environment_name}} Setup {{installer_version}}.exe' offline_artifacts/{{time}}/{{environment_name}}_{{installer_version}}_windows32bit.exe -elif [ "{{offline_installer_type}}" == "windows64bit" ]; -then - cp '{{offline_repo_location}}/offline-installer-repo/src/dist/{{installer_version}}/win/x64/{{environment_name}} Setup {{installer_version}}.exe' offline_artifacts/{{time}}/{{environment_name}}_{{installer_version}}_windows64bit.exe -elif [ "{{offline_installer_type}}" == "linux64bit" ]; -then - cp '{{offline_repo_location}}/offline-installer-repo/src/dist/{{installer_version}}/linux/x64/{{environment_name}}_{{installer_version}}_amd64.deb' offline_artifacts/{{time}}/{{environment_name}}_{{installer_version}}_linux64bit.deb -fi - - diff --git a/ansible/roles/offline-installer/templates/build.sh.j2 b/ansible/roles/offline-installer/templates/build.sh.j2 deleted file mode 100644 index 720b64b8c6..0000000000 --- a/ansible/roles/offline-installer/templates/build.sh.j2 +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "Offline Installer for Sunbird" - -# Build script -set -eo pipefail - -if [ "{{offline_installer_type}}" == "windows32bit" ]; -then - - docker run --rm -v ${PWD}:/private/ i386/node:8.16.2-stretch bash -x /private/32-bit-prerequisite.sh - -fi - -#chmod +x setupOfflineInstaller.sh -docker run --rm --env-file envfile --env ELECTRON_CACHE="/root/.cache/electron" --env ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" -v ${PWD}:/project electronuserland/builder:wine bash -x setupOfflineInstaller.sh - -echo "Build the installer succesfully" diff --git a/ansible/roles/offline-installer/templates/env.json.j2 b/ansible/roles/offline-installer/templates/env.json.j2 deleted file mode 100644 index 8705f96ab9..0000000000 --- a/ansible/roles/offline-installer/templates/env.json.j2 +++ /dev/null @@ -1,10 +0,0 @@ -{ - "APP_BASE_URL": "https://sunbird.org", - "CHANNEL": "sunbird", - "TELEMETRY_SYNC_INTERVAL_IN_SECS": 30, - "APP_ID": "local.sunbird.desktop", - "TELEMETRY_PACKET_SIZE": 200, - "APP_BASE_URL_TOKEN": "{{offline_app_base_url_token}}", - "APP_NAME": "SUNBIRD", - "MODE": "standalone" -} diff --git a/ansible/roles/offline-installer/templates/envfile.j2 b/ansible/roles/offline-installer/templates/envfile.j2 deleted file mode 100644 index 9b98165e0b..0000000000 --- a/ansible/roles/offline-installer/templates/envfile.j2 +++ /dev/null @@ -1,3 +0,0 @@ -GITHUB_ACCESS_TOKEN={{offline_git_access_token}} -GITHUB_PRIVATE_REPO={{offline_git_private_repo}} -TARGET_ENVIRONMENT={{offline_target_env}} diff --git a/ansible/roles/offline-installer/templates/metadata.sh.j2 b/ansible/roles/offline-installer/templates/metadata.sh.j2 deleted file mode 100644 index 9d7e7a0dd7..0000000000 --- a/ansible/roles/offline-installer/templates/metadata.sh.j2 +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -env_name={{environment_name}} -version={{installer_version}} -artifactFolder=$(find offline_artifacts/* -type d) - -# constructing the latest.json file -echo "{\"version\":\"${version}\",\"windows\":{\"32bit\":\"${env_name}_${version}_windows32bit.exe\",\"64bit\":\"${env_name}_${version}_windows64bit.exe\"},\"linux\":{\"64bit\":\"${env_name}_${version}_linux64bit.deb\"}}" | jq '.' | tee -a {{offline_repo_location}}/offline-installer-repo/${artifactFolder}/latest.json diff --git a/ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 b/ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 deleted file mode 100644 index 64b5a019b8..0000000000 --- a/ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Downgrade the node version -npm install -g n -n 8.16 -cd /project/src - -if [ "{{offline_installer_type}}" != "windows32bit" ]; -then -#Build the offline installer - npm install - npm run dist -fi - -if [ "{{offline_installer_type}}" == "windows32bit" ]; -then -# npm run dist - npm run dist-win32 -elif [ "{{offline_installer_type}}" == "windows64bit" ]; -then - npm run dist-win64 -elif [ "{{offline_installer_type}}" == "linux64bit" ]; -then - npm run dist-linux -fi diff --git a/pipelines/offlineinstaller/Jenkinsfile b/pipelines/offlineinstaller/Jenkinsfile index a4e6a8f610..c97c01a9bd 100644 --- a/pipelines/offlineinstaller/Jenkinsfile +++ b/pipelines/offlineinstaller/Jenkinsfile @@ -31,7 +31,7 @@ node() { } stage('Install the offline desktop Application') { ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" - ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\" -v" + ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\"" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From 6ffe54280dc385838ade36df168fa6b83d008afd Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 11:57:28 +0530 Subject: [PATCH 024/203] fix: removed unused files, typos Signed-off-by: Keshav Prasad --- .../roles/deploy-player/tasks/main.yml | 2 +- .../ansible/roles/helm-deploy/tasks/main.yml | 10 ++-- .../roles/sunbird-deploy/tasks/main.yml | 2 +- .../backup/jenkins-backup-upload/Jenkinsfile | 50 ------------------- 4 files changed, 7 insertions(+), 57 deletions(-) delete mode 100644 pipelines/backup/jenkins-backup-upload/Jenkinsfile diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index 5abdc85449..52500df2e3 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -48,7 +48,7 @@ loop_control: loop_var: outer_item -- name: Create the token pubic key file +- name: Create the token public key file copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_kid }}" content: "{{ core_vault_sunbird_sso_publickey }}" diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index bd40bcfb82..a57c847b97 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -38,13 +38,13 @@ loop_control: loop_var: outer_item -- name: Create the token pubic key file +- name: Create the token public key file copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_prefix }}" content: "{{ core_vault_sunbird_sso_publickey }}" when: release_name == "adminutils" -- name: Create the token pubic key file for ML Services +- name: Create the token public key file for ML Services copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_kid }}" content: "{{ core_vault_sunbird_sso_publickey }}" @@ -107,7 +107,7 @@ args: executable: /bin/bash register: deployment_result - ignore_errors: yes + ignore_errors: true - name: Get deployed image name - deployments shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' @@ -121,7 +121,7 @@ args: executable: /bin/bash register: daemonset_result - ignore_errors: yes + ignore_errors: true - name: Get deployed image name - daemonsets shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" @@ -135,7 +135,7 @@ args: executable: /bin/bash register: statefulset_result - ignore_errors: yes + ignore_errors: true - name: Get deployed image name - statefulsets shell: "kubectl get statefulsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index 33fba6fb42..09e96cf25e 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -39,7 +39,7 @@ loop_control: loop_var: outer_item -- name: Create the token pubic key file +- name: Create the token public key file copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_kid }}" content: "{{ core_vault_sunbird_sso_publickey }}" diff --git a/pipelines/backup/jenkins-backup-upload/Jenkinsfile b/pipelines/backup/jenkins-backup-upload/Jenkinsfile deleted file mode 100644 index cd880c9390..0000000000 --- a/pipelines/backup/jenkins-backup-upload/Jenkinsfile +++ /dev/null @@ -1,50 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - stage('deploy'){ - values = [:] - currentWs = sh(returnStdout: true, script: 'pwd').trim() - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - ansiblePlaybook = "${currentWs}/ansible/jenkins-backup.yml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - } - catch (err) { - currentBuild.result = "FAILURE" - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} From e635d07e7640e3aabccb8e06a85dadb0d593634f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 12:05:16 +0530 Subject: [PATCH 025/203] fix: remove verbosity Signed-off-by: Keshav Prasad --- pipelines/backup/jenkins-backup/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/backup/jenkins-backup/Jenkinsfile b/pipelines/backup/jenkins-backup/Jenkinsfile index 27570e2a87..787a44fe17 100644 --- a/pipelines/backup/jenkins-backup/Jenkinsfile +++ b/pipelines/backup/jenkins-backup/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/jenkins-backup.yml" - ansibleExtraArgs = "-v --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 4d1b0a5222f3b7dd4203676415363ad6ca495313 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 14:31:57 +0530 Subject: [PATCH 026/203] fix: adding default container name Signed-off-by: Keshav Prasad --- ansible/mongodb-backup.yml | 3 ++- ansible/roles/mongodb-backup/defaults/main.yml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ansible/mongodb-backup.yml b/ansible/mongodb-backup.yml index 2ab4091fc4..4db8d263bd 100644 --- a/ansible/mongodb-backup.yml +++ b/ansible/mongodb-backup.yml @@ -1,5 +1,6 @@ +--- - hosts: "{{ host }}" - become: yes + become: true vars_files: - ['{{inventory_dir}}/secrets.yml'] roles: diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index d7b56ebefd..da5a0f710f 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -1,5 +1,5 @@ mongo_backup_dir: '/tmp/mongo-backup' -mongo_backup_azure_container_name: "{{ mongo_backup_azure_container_name }}" +mongo_backup_azure_container_name: "mongodb-backup" # This variable is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name From 19b876d2c6c217bd3273425222be8e77551b6be5 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 15:25:46 +0530 Subject: [PATCH 027/203] feat: adding new mongo backup jobs Signed-off-by: Keshav Prasad --- .../Core/jobs/GraylogMongoDbBackup/config.xml | 130 ++++++++++++++++++ .../jobs/Core/jobs/MongoDbBackup/config.xml | 130 ++++++++++++++++++ 2 files changed, 260 insertions(+) create mode 100644 deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml create mode 100644 deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml new file mode 100644 index 0000000000..f1e05c88a9 --- /dev/null +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml @@ -0,0 +1,130 @@ + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + private_branch + + choice-parameter-189743214208409 + 1 + + true + + + + true + + + GraylogMongoDbBackup + OpsAdministration/dev/Core/GraylogMongoDbBackup + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-189743216959018 + 1 + + true + + + + true + + + GraylogMongoDbBackup + OpsAdministration/dev/Core/GraylogMongoDbBackup + + + ET_FORMATTED_HTML + true + + + host + + graylog + false + + + + + 0 + 0 + + false + project + false + + + + + + + 00 4 * * * + + + + + + + 2 + + + https://github.com/keshavprasadms/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/backup/mongodb-backup/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml new file mode 100644 index 0000000000..ff3d4bd8f3 --- /dev/null +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml @@ -0,0 +1,130 @@ + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + private_branch + + choice-parameter-189743214208409 + 1 + + true + + + + true + + + MongoDbBackup + OpsAdministration/dev/Core/MongoDbBackup + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-189743216959018 + 1 + + true + + + + true + + + MongoDbBackup + OpsAdministration/dev/Core/MongoDbBackup + + + ET_FORMATTED_HTML + true + + + host + + mongo_master + false + + + + + 0 + 0 + + false + project + false + + + + + + + 30 3 * * * + + + + + + + 2 + + + https://github.com/keshavprasadms/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/backup/mongodb-backup/Jenkinsfile + false + + + false + From 332c7d70b3b37991aebcb75c0a021076780fb95c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 15:34:49 +0530 Subject: [PATCH 028/203] fix: delegate container creation to local Signed-off-by: Keshav Prasad --- ansible/roles/es-azure-snapshot/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index e804b4344d..8ce0fcd267 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -8,6 +8,8 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + apply: + delegate_to: localhost vars: blob_container_name: "{{ es_backup_storage }}" container_public_access: "off" From 765c15c25bf08de64b2ca46450f3f6507bc5851c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 29 Sep 2022 06:41:34 +0530 Subject: [PATCH 029/203] fix: updated pip package name Signed-off-by: Keshav Prasad --- ansible/bootstrap.yml | 2 +- pipelines/backup/es-backup/Jenkinsfile | 2 +- private_repo/ansible/inventory/dev/Core/common.yml | 3 --- private_repo/ansible/inventory/dev/Core/secrets.yml | 3 --- 4 files changed, 2 insertions(+), 8 deletions(-) diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index aba26fbbd4..d8bf9fa494 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -8,7 +8,7 @@ - name: Installing other packages apt: state: present - name: ['python-pkg-resources', 'python2-pip'] + name: ['python-pkg-resources', 'python-pip'] when: ansible_distribution_version | float < 18 - name: Installing other packages apt: diff --git a/pipelines/backup/es-backup/Jenkinsfile b/pipelines/backup/es-backup/Jenkinsfile index 81d46a2a2b..a941380cb9 100644 --- a/pipelines/backup/es-backup/Jenkinsfile +++ b/pipelines/backup/es-backup/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/es.yml" - ansibleExtraArgs = "--tags \"es_backup\" -v --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags \"es_backup\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 8277399b44..bd034f9bb3 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -22,7 +22,6 @@ alerts_mailing_list : "devops@myorg.com" # Comma separat # Define the below if you are using Azure Cloud -# Management Storage Account # Note - You can use the same azure account for the below variables or have separate azure accounts sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) @@ -35,11 +34,9 @@ azure_management_storage_account_name: "{{ sunbird_management_storage_account_na azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" # Define the below if you are using AWS Cloud -# Management Storage Bucket aws_management_bucket_name: "" # Define the below if you are using Google Cloud -# Management Storage Bucket gcs_management_bucket_name: "" # ------------------------------------------------------------------------------------------------------------ # diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index f157d85862..cf76c3d66e 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -22,7 +22,6 @@ sunbird_management_storage_account_key: "change.azure.storage.account.key" sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" # Define the below if you are using Azure Cloud -# Management Storage Account azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" @@ -31,12 +30,10 @@ azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" # Define the below if you are using AWS Cloud -# Management Storage Bucket aws_management_bucket_user_access_key: "" aws_management_bucket_user_secret_key: "" # Define the below if you are using Google Cloud -# Management Storage Bucket gcs_management_bucket_service_account: | From 769de28fb936a02ab4f881674fa521ec28a13d9d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 29 Sep 2022 06:50:28 +0530 Subject: [PATCH 030/203] fix: remove unnessary all tags Signed-off-by: Keshav Prasad --- ansible/bootstrap.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index d8bf9fa494..30d57ca52f 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -19,9 +19,8 @@ - bootstrap_any tags: - bootstrap_any - - all -- hosts: "{{hosts}}" +- hosts: "{{ hosts }}" become: yes ignore_unreachable: yes vars_files: @@ -40,5 +39,4 @@ roles: - vm-agents-nodeexporter tags: - - node_exporter - - all + - node_exporter \ No newline at end of file From beeabe072c2705b52a580c23ce87153113d41642 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 29 Sep 2022 14:03:28 +0530 Subject: [PATCH 031/203] fix: renamed to cassandra backup folder (#3560) Signed-off-by: Keshav Prasad --- ansible/roles/cassandra-backup/tasks/main.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index ac0682c58a..bbc7246c48 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -14,11 +14,11 @@ mode: 0755 - set_fact: - cassandra_backup_gzip_file_name: "cassandra-backup-{{ lookup('pipe', 'date +%Y%m%d') }}-{{ ansible_hostname }}-new" + cassandra_backup_folder_name: "cassandra-backup-{{ lookup('pipe', 'date +%Y%m%d') }}-{{ ansible_hostname }}-new" - name: run the backup script become: true - shell: python3 cassandra_backup.py --snapshotname "{{ cassandra_backup_gzip_file_name }}" --snapshotdirectory "{{ cassandra_backup_gzip_file_name }}" "{{additional_arguments|d('')}}" + shell: python3 cassandra_backup.py --snapshotname "{{ cassandra_backup_folder_name }}" --snapshotdirectory "{{ cassandra_backup_folder_name }}" "{{additional_arguments|d('')}}" args: chdir: /data/cassandra/backup async: 14400 @@ -39,7 +39,8 @@ vars: blob_container_name: "{{ cassandra_backup_storage }}" container_public_access: "off" - local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_gzip_file_name }}" + blob_container_folder_path: "" + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" From 29dc554718d918fc76027b6427c32bce9b6d99db Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Mon, 17 Oct 2022 16:44:45 +0530 Subject: [PATCH 032/203] Updated post-install script (#3564) --- .../tasks/knowledge_platform_tasks.yaml | 12 +++---- ansible/roles/post-install/tasks/main.yml | 6 ++-- .../roles/post-install/tasks/user_org.yaml | 32 +------------------ kubernetes/pipelines/post-install/Jenkinsfile | 2 +- 4 files changed, 11 insertions(+), 41 deletions(-) diff --git a/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml b/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml index f7a788d417..d88878755d 100644 --- a/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml +++ b/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml @@ -11,7 +11,7 @@ } } with_items: "{{ master_category }}" - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Create Object category uri: @@ -73,7 +73,7 @@ } } } - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Create framework category uri: @@ -89,7 +89,7 @@ } } with_items: "{{ framework_category }}" - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Create framework terms uri: @@ -105,7 +105,7 @@ } } with_items: "{{ framework_terms }}" - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Publish Framework uri: @@ -116,7 +116,7 @@ X-Channel-Id: "{{ sunbird_custodian_org_id }}" body: |- {} - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Assosiating framework with channel uri: @@ -153,7 +153,7 @@ "fields": ["name","identifier","code","description"] } } - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" register: created_framework - name: Printing the output debug: diff --git a/ansible/roles/post-install/tasks/main.yml b/ansible/roles/post-install/tasks/main.yml index 121e4d0653..84513e9d84 100644 --- a/ansible/roles/post-install/tasks/main.yml +++ b/ansible/roles/post-install/tasks/main.yml @@ -65,6 +65,6 @@ } } -# - import_tasks: knowledge_platform_tasks.yaml -# - import_tasks: user_org.yaml -# - import_tasks: forms.yaml +- import_tasks: knowledge_platform_tasks.yaml +- import_tasks: user_org.yaml +- import_tasks: forms.yaml diff --git a/ansible/roles/post-install/tasks/user_org.yaml b/ansible/roles/post-install/tasks/user_org.yaml index caee5ebaa7..e06f28ad2b 100644 --- a/ansible/roles/post-install/tasks/user_org.yaml +++ b/ansible/roles/post-install/tasks/user_org.yaml @@ -72,11 +72,6 @@ "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "OFFICIAL_TEXTBOOK_BADGE_ISSUER", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "PUBLIC", "operation":"add", @@ -87,11 +82,6 @@ "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "TEACHER_BADGE_ISSUER", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "CONTENT_CREATOR", "operation":"add", @@ -103,12 +93,7 @@ "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, { - "role": "SYSTEM_ADMINISTRATION", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, - { - "role": "ANNOUNCEMENT_SENDER", + "role": "SYSTEM_ADMINISTRATION", "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, @@ -127,26 +112,11 @@ "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "CONTENT_REVIEW", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, - { - "role": "CONTENT_CREATION", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "MEMBERSHIP_MANAGEMENT", "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "COURSE_CREATOR", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "BOOK_CREATOR", "operation":"add", diff --git a/kubernetes/pipelines/post-install/Jenkinsfile b/kubernetes/pipelines/post-install/Jenkinsfile index deda3ff045..bab10fb0fa 100644 --- a/kubernetes/pipelines/post-install/Jenkinsfile +++ b/kubernetes/pipelines/post-install/Jenkinsfile @@ -24,7 +24,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim().toLowerCase() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "$currentWs/ansible/post-install.yaml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag} -v" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -v" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From a1ab95bf3c2b49c74523a86e92c827ec8581ca87 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Mon, 17 Oct 2022 19:24:22 +0530 Subject: [PATCH 033/203] certificates api added --- ansible/roles/kong-api/defaults/main.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index 907e250460..edd37b3470 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9748,3 +9748,23 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true +- name: getDetailsOfProjectsWithCertificate + uris: "{{ userProjects_service_prefix }}/mlprojects/v1/certificates" + upstream_url: "{{ ml_project_service_url }}/v1/userProjects/certificates" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - projectAccess + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: false + config.enabled: false From e37ca7291abf51ec385d9c464a3852f32b5724f1 Mon Sep 17 00:00:00 2001 From: Jayaprakash8887 Date: Wed, 19 Oct 2022 14:07:45 +0530 Subject: [PATCH 034/203] Issue #KN-9 feat: Content Publish API refactor. --- .../stack-sunbird/templates/content-service_application.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index d33dbecf6f..b45d85dee1 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -493,6 +493,7 @@ kafka { urls : "{{ kafka_urls }}" topic.send.enable : true topics.instruction : "{{ env_name }}.learning.job.request" + publish.request.topic : "{{ env_name }}.publish.job.request" } # DIAL Link Config From cde575b2de6bfa1e30fdc04fa999cdeae37c4bbb Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 20 Oct 2022 16:49:51 +0530 Subject: [PATCH 035/203] Updated the ingestion specs --- ansible/roles/ml-analytics-service/defaults/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 915992714e..abd8aae3b6 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -52,10 +52,10 @@ ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_interna ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' ml_analytics_azure_sas_token: "{{ sunbird_private_storage_account_key }}" -ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_obs_distinctCnt_azure_blob_path: "observation/distinctCount/" ml_analytics_obs_distinctCnt_domain_azure_blob_path: "observation/distinctCount_domain/" ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path: "observation/distinctCount_domain_criteria/" From f2dacabae324e635d74e42ca451f9dcf080b8add Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Tue, 25 Oct 2022 11:57:37 +0530 Subject: [PATCH 036/203] project certificate schema jsons added --- .../registry/schemas/ProjectCertificate.json | 75 ++++++ .../final_project_credential_template.json | 28 +++ .../schema/final_project_sunbird_context.json | 91 +++++++ .../schema/final_project_v1_context.json | 237 ++++++++++++++++++ 4 files changed, 431 insertions(+) create mode 100644 kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json create mode 100644 utils/sunbird-RC/schema/final_project_credential_template.json create mode 100644 utils/sunbird-RC/schema/final_project_sunbird_context.json create mode 100644 utils/sunbird-RC/schema/final_project_v1_context.json diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json new file mode 100644 index 0000000000..c8555839d8 --- /dev/null +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -0,0 +1,75 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "ProjectCertificate": { + "$ref": "#/definitions/ProjectCertificate" + } + }, + "required": [ + "ProjectCertificate" + ], + "title": "ProjectCertificate", + "definitions": { + "ProjectCertificate": { + "$id": "#/properties/ProjectCertificate", + "type": "object", + "title": "The ProjectCertificate Schema", + "required": [ + "recipient" + ], + "properties": { + "status": { + "type": "string", + "enum": ["ACTIVE", "REVOKED", "DELETED"] + }, + "recipient":{ + "$id": "#/properties/recipient", + "$ref": "Recipient.json#/definitions/Recipient" + }, + "templateUrl": { + "type": "string" + }, + "issuer":{ + "$id": "#/properties/issuer", + "$ref": "Issuer.json#/definitions/Issuer" + }, + "projectName":{ + "type": "string" + }, + "projectId":{ + "type": "string" + }, + "solutionId":{ + "type": "string" + }, + "solutionName":{ + "type": "string" + }, + "programId":{ + "type": "string" + }, + "programName":{ + "type": "string" + }, + "completedDate": { + "type": "string" + } + + } + } + }, + "_osConfig": { + "uniqueIndexFields": [ + ], + "ownershipAttributes": [], + "roles": [ + ], + "inviteRoles": [ + "anonymous" + ], + "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], + "enableLogin": false, + "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_credential_template.json" + } +} \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_credential_template.json b/utils/sunbird-RC/schema/final_project_credential_template.json new file mode 100644 index 0000000000..2ff3508e79 --- /dev/null +++ b/utils/sunbird-RC/schema/final_project_credential_template.json @@ -0,0 +1,28 @@ +{ + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_v1_context.json", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_sunbird_context.json" + ], + "type": [ + "VerifiableCredential" + ], + "id":"did:sunbird:{{osid}}", + "issuanceDate": "{{osCreatedAt}}", + "credentialSubject": { + "type":"{{certificateLabel}}", + "recipientName": "{{recipient.name}}", + "projectName": "{{projectName}}", + "projectId": "{{projectId}}", + "solutionId": "{{solutionId}}", + "solutionName": "{{solutionName}}" + }, + "issuer":{ + "id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#Issuer", + "type":[ + "Issuer" + ], + "name":"{{issuer.name}}", + "publicKey":["{{issuer.kid}}"] + } + } \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_sunbird_context.json b/utils/sunbird-RC/schema/final_project_sunbird_context.json new file mode 100644 index 0000000000..bbc31a4010 --- /dev/null +++ b/utils/sunbird-RC/schema/final_project_sunbird_context.json @@ -0,0 +1,91 @@ +{ + "@context": { + "@version": 1.1, + "@protected": true, + "ProjectCertificate": { + "@id": "https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#ProjectCertificate", + "@context": { + "id": "@id", + "@version": 1.1, + "@protected": true, + "ProjectCertificate": "schema:Text" + } + }, + "projectName":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#projectName", + "@context": { + "name":"schema:Text" + } + }, + "projectId":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#projectId", + "@context": { + "name":"schema:Text" + } + }, + "solutionName":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#solutionName", + "@context": { + "name":"schema:Text" + } + }, + "solutionId":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#solutionId", + "@context": { + "name":"schema:Text" + } + }, + "recipientName":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#recipientName", + "@context": { + "name":"schema:Text" + } + }, + "name":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#name", + "@context": { + "name":"schema:Text" + } + }, + "publicKey":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#publicKey", + "@context": { + "name":"schema:Text" + } + }, + "url":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#url", + "@context": { + "name":"schema:Text" + } + }, + "designation":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#designation", + "@context": { + "name":"schema:Text" + } + }, + "image":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#image", + "@context": { + "name":"schema:Text" + } + }, + "identity":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#identity", + "@context": { + "name":"schema:Text" + } + }, + "signatory": { + "@id":"https://github.com/sunbird-specs/vc-specs#signatory", + "@container": "@list" + }, + "templateUrl": { + "@id": "https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#templateUrl", + "@context": { + "name": "schema:Text" + } + } + } +} \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_v1_context.json b/utils/sunbird-RC/schema/final_project_v1_context.json new file mode 100644 index 0000000000..d028ec2a3f --- /dev/null +++ b/utils/sunbird-RC/schema/final_project_v1_context.json @@ -0,0 +1,237 @@ +{ + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "VerifiableCredential": { + "@id": "https://www.w3.org/2018/credentials#VerifiableCredential", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "credentialSchema": { + "@id": "cred:credentialSchema", + "@type": "@id", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + + "JsonSchemaValidator2018": "cred:JsonSchemaValidator2018" + } + }, + "credentialStatus": {"@id": "cred:credentialStatus", "@type": "@id"}, + "credentialSubject": {"@id": "cred:credentialSubject", "@type": "@id"}, + "evidence": {"@id": "cred:evidence", "@type": "@id"}, + "expirationDate": {"@id": "cred:expirationDate", "@type": "xsd:dateTime"}, + "holder": {"@id": "cred:holder", "@type": "@id"}, + "issued": {"@id": "cred:issued", "@type": "xsd:dateTime"}, + "issuer": {"@id": "cred:issuer", "@type": "@id"}, + "issuanceDate": {"@id": "cred:issuanceDate", "@type": "xsd:dateTime"}, + "proof": {"@id": "sec:proof", "@type": "@id", "@container": "@graph"}, + "refreshService": { + "@id": "cred:refreshService", + "@type": "@id", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + + "ManualRefreshService2018": "cred:ManualRefreshService2018" + } + }, + "termsOfUse": {"@id": "cred:termsOfUse", "@type": "@id"}, + "validFrom": {"@id": "cred:validFrom", "@type": "xsd:dateTime"}, + "validUntil": {"@id": "cred:validUntil", "@type": "xsd:dateTime"} + } + }, + + "VerifiablePresentation": { + "@id": "https://www.w3.org/2018/credentials#VerifiablePresentation", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + "sec": "https://w3id.org/security#", + + "holder": {"@id": "cred:holder", "@type": "@id"}, + "proof": {"@id": "sec:proof", "@type": "@id", "@container": "@graph"}, + "verifiableCredential": {"@id": "cred:verifiableCredential", "@type": "@id", "@container": "@graph"} + } + }, + + "EcdsaSecp256k1Signature2019": { + "@id": "https://w3id.org/security#EcdsaSecp256k1Signature2019", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "EcdsaSecp256r1Signature2019": { + "@id": "https://w3id.org/security#EcdsaSecp256r1Signature2019", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "Ed25519Signature2018": { + "@id": "https://w3id.org/security#Ed25519Signature2018", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "RsaSignature2018": { + "@id": "https://w3id.org/security#RsaSignature2018", + "@context": { + "@version": 1.1, + "@protected": true, + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "proof": {"@id": "https://w3id.org/security#proof", "@type": "@id", "@container": "@graph"} + } + } \ No newline at end of file From d32a99d8194a51e78f7a0c59a161cd52282528b9 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Tue, 25 Oct 2022 17:29:21 +0530 Subject: [PATCH 037/203] certicificate schema filename changes and project certificate api added --- ansible/roles/kong-api/defaults/main.yml | 25 ++++++++++- .../registry/schemas/ProjectCertificate.json | 44 +++++++++---------- ....json => project_credential_template.json} | 4 +- ...text.json => project_sunbird_context.json} | 0 ...1_context.json => project_v1_context.json} | 0 5 files changed, 48 insertions(+), 25 deletions(-) rename utils/sunbird-RC/schema/{final_project_credential_template.json => project_credential_template.json} (82%) rename utils/sunbird-RC/schema/{final_project_sunbird_context.json => project_sunbird_context.json} (100%) rename utils/sunbird-RC/schema/{final_project_v1_context.json => project_v1_context.json} (100%) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index edd37b3470..2673650119 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9748,7 +9748,8 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true -- name: getDetailsOfProjectsWithCertificate + +- name: projectCertificateList uris: "{{ userProjects_service_prefix }}/mlprojects/v1/certificates" upstream_url: "{{ ml_project_service_url }}/v1/userProjects/certificates" strip_uri: true @@ -9768,3 +9769,25 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false +- name: createRCProjectCertificate + uris: "{{ registry_service_prefix }}/certificate/v1/create" + upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - certificateCreate + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: request-transformer + config.remove.headers: Authorization + - name: opa-checks + config.required: true + config.enabled: true diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index c8555839d8..31257ac8b1 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -34,27 +34,27 @@ "$id": "#/properties/issuer", "$ref": "Issuer.json#/definitions/Issuer" }, - "projectName":{ - "type": "string" - }, - "projectId":{ - "type": "string" - }, - "solutionId":{ - "type": "string" - }, - "solutionName":{ - "type": "string" - }, - "programId":{ - "type": "string" - }, - "programName":{ - "type": "string" - }, - "completedDate": { - "type": "string" - } + "projectName":{ + "type": "string" + }, + "projectId":{ + "type": "string" + }, + "solutionId":{ + "type": "string" + }, + "solutionName":{ + "type": "string" + }, + "programId":{ + "type": "string" + }, + "programName":{ + "type": "string" + }, + "completedDate": { + "type": "string" + } } } @@ -70,6 +70,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_credential_template.json" + "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_credential_template.json" } } \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json similarity index 82% rename from utils/sunbird-RC/schema/final_project_credential_template.json rename to utils/sunbird-RC/schema/project_credential_template.json index 2ff3508e79..230fdccce9 100644 --- a/utils/sunbird-RC/schema/final_project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -1,8 +1,8 @@ { "@context": [ "https://www.w3.org/2018/credentials/v1", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_v1_context.json", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_sunbird_context.json" + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" ], "type": [ "VerifiableCredential" diff --git a/utils/sunbird-RC/schema/final_project_sunbird_context.json b/utils/sunbird-RC/schema/project_sunbird_context.json similarity index 100% rename from utils/sunbird-RC/schema/final_project_sunbird_context.json rename to utils/sunbird-RC/schema/project_sunbird_context.json diff --git a/utils/sunbird-RC/schema/final_project_v1_context.json b/utils/sunbird-RC/schema/project_v1_context.json similarity index 100% rename from utils/sunbird-RC/schema/final_project_v1_context.json rename to utils/sunbird-RC/schema/project_v1_context.json From 179885117785d858a1071ca54718bf659ce1e3f1 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Tue, 25 Oct 2022 17:39:48 +0530 Subject: [PATCH 038/203] format changes --- ansible/roles/kong-api/defaults/main.yml | 1 + .../registry/schemas/ProjectCertificate.json | 54 +++++++++---------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index 2673650119..f9ae07e61c 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9769,6 +9769,7 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false + - name: createRCProjectCertificate uris: "{{ registry_service_prefix }}/certificate/v1/create" upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index 31257ac8b1..98f3bb91e9 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -11,29 +11,29 @@ ], "title": "ProjectCertificate", "definitions": { - "ProjectCertificate": { - "$id": "#/properties/ProjectCertificate", - "type": "object", - "title": "The ProjectCertificate Schema", - "required": [ - "recipient" - ], - "properties": { - "status": { - "type": "string", - "enum": ["ACTIVE", "REVOKED", "DELETED"] - }, - "recipient":{ - "$id": "#/properties/recipient", - "$ref": "Recipient.json#/definitions/Recipient" - }, - "templateUrl": { - "type": "string" - }, - "issuer":{ - "$id": "#/properties/issuer", - "$ref": "Issuer.json#/definitions/Issuer" - }, + "ProjectCertificate": { + "$id": "#/properties/ProjectCertificate", + "type": "object", + "title": "The ProjectCertificate Schema", + "required": [ + "recipient" + ], + "properties": { + "status": { + "type": "string", + "enum": ["ACTIVE", "REVOKED", "DELETED"] + }, + "recipient":{ + "$id": "#/properties/recipient", + "$ref": "Recipient.json#/definitions/Recipient" + }, + "templateUrl": { + "type": "string" + }, + "issuer":{ + "$id": "#/properties/issuer", + "$ref": "Issuer.json#/definitions/Issuer" + }, "projectName":{ "type": "string" }, @@ -55,10 +55,10 @@ "completedDate": { "type": "string" } - - } - } - }, + + } + } + }, "_osConfig": { "uniqueIndexFields": [ ], From 2b7747c2e0636f53ff529cf97de0369c80d756f7 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Fri, 28 Oct 2022 18:56:55 +0530 Subject: [PATCH 039/203] project certificate download new api added --- ansible/roles/kong-api/defaults/main.yml | 25 +++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index f9ae07e61c..ff4323914e 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9769,7 +9769,7 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false - + - name: createRCProjectCertificate uris: "{{ registry_service_prefix }}/certificate/v1/create" upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" @@ -9792,3 +9792,26 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true + +- name: getProjectRCCertificate + uris: "{{ registry_service_prefix }}/projetCertificate/v1/download" + upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - anonymousCertificateAccess + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: request-transformer + config.remove.headers: Authorization + - name: opa-checks + config.required: true + config.enabled: true From 97c0afc24d3fc9810364545022352b5bcae8eb34 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Sun, 30 Oct 2022 09:03:04 +0530 Subject: [PATCH 040/203] Add gcloud role and related tasks (#3566) --- ansible/roles/cassandra-backup/tasks/main.yml | 12 +++++ ansible/roles/gcloud-cli/tasks/main.yml | 19 +++++++ .../roles/gcp-cloud-storage/defaults/main.yml | 49 +++++++++++++++++++ .../gcp-cloud-storage/tasks/delete-batch.yml | 11 +++++ .../gcp-cloud-storage/tasks/download.yml | 11 +++++ .../gcp-cloud-storage/tasks/gcloud-auth.yml | 14 ++++++ .../gcp-cloud-storage/tasks/gcloud-revoke.yml | 8 +++ .../roles/gcp-cloud-storage/tasks/main.yml | 20 ++++++++ .../gcp-cloud-storage/tasks/upload-batch.yml | 11 +++++ .../roles/gcp-cloud-storage/tasks/upload.yml | 11 +++++ .../roles/postgresql-backup/defaults/main.yml | 2 +- .../roles/postgresql-backup/tasks/main.yml | 11 +++++ .../roles/postgresql-restore/tasks/main.yml | 20 ++++---- .../ansible/inventory/dev/Core/common.yml | 7 ++- .../ansible/inventory/dev/Core/secrets.yml | 3 +- 15 files changed, 197 insertions(+), 12 deletions(-) create mode 100644 ansible/roles/gcloud-cli/tasks/main.yml create mode 100644 ansible/roles/gcp-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/download.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/upload.yml diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index bbc7246c48..fc662bcea5 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -44,6 +44,18 @@ storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" + +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ cassandra_backup_storage }}" + dest_folder_path: "" + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" + when: cloud_service_provider == "gcloud" - name: clean up backup dir after upload file: path="{{ cassandra_backup_dir }}" state=absent + diff --git a/ansible/roles/gcloud-cli/tasks/main.yml b/ansible/roles/gcloud-cli/tasks/main.yml new file mode 100644 index 0000000000..4e39b7ceaf --- /dev/null +++ b/ansible/roles/gcloud-cli/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Add gcloud signing key + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + +- name: Add gcloud repository into sources list + apt_repository: + repo: "deb https://packages.cloud.google.com/apt cloud-sdk main" + state: present + +- name: Install google cloud cli with specific version and dependent packages + apt: + pkg: + - ca-certificates + - curl + - apt-transport-https + - gnupg + - google-cloud-cli=406.0.0-0 diff --git a/ansible/roles/gcp-cloud-storage/defaults/main.yml b/ansible/roles/gcp-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..086cf9c50d --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/defaults/main.yml @@ -0,0 +1,49 @@ +# GCP bucket name +# Example - +# bucket_name: "sunbird-dev-public" +gcp_bucket_name: "" + +# The service account key file +# Example - +# gcp_storage_key_file: "/tmp/gcp.json" +gcp_storage_key_file: "" + +# Folder name in GCP bucket +# Example - +# dest_folder_name: "my-destination-folder" +dest_folder_name: "" + +# The delete pattern to delete files and folder +# Example - +# file_delete_pattern: "my-drectory/*" +# file_delete_pattern: "my-drectory/another-directory/*" +# file_delete_pattern: "*" +file_delete_pattern: "" + +# The path to local file which has to be uploaded to gcloud storage +# The local path to store the file after downloading from gcloud storage +# Example - +# local_file_or_folder_path: "/workspace/my-folder/myfile.json" +# local_file_or_folder_path: "/workspace/my-folder" +local_file_or_folder_path: "" + +# The name of the file in gcloud storage after uploading from local path +# The name of the file in gcloud storage that has to be downloaded +# Example - +# dest_file_name: "/myfile-blob.json" +dest_file_name: "" + + +# The folder path in gcloud storage to upload the files starting from the root of the bucket +# This path should start with / if we provide a value for this variable since we are going to append this path as below +# {{ bucket_name }}{{ dest_folder_name }} +# The above translates to "my-bucket/my-folder-path" +# Example - +# dest_folder_path: "/my-folder/json-files-folder" +# This variable can also be empty as shown below, which means we will upload directly at the root path of the bucket +dest_folder_path: "" + +# The local folder path which has to be uploaded to gcloud storage +# Example - +# local_source_folder: "/workspace/my-folder/json-files-folder" +local_source_folder: "" diff --git a/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml new file mode 100644 index 0000000000..ad0e4449d6 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Delete folder recursively in gcp storage + shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/gcp-cloud-storage/tasks/download.yml b/ansible/roles/gcp-cloud-storage/tasks/download.yml new file mode 100644 index 0000000000..c8c6e956ad --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/download.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Download from gcloud storage + shell: gsutil cp "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" "{{ local_file_or_folder_path }}" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml \ No newline at end of file diff --git a/ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml b/ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml new file mode 100644 index 0000000000..a480bdc275 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml @@ -0,0 +1,14 @@ +--- +- name: create tmp gcp service key file + tempfile: + state: file + suffix: gcp + register: config_key + +- name: Copy service account key file + copy: + content: "{{ gcp_storage_key_file }}" + dest: "{{ config_key.path }}" + +- name: Configure gcloud service account + shell: gcloud auth activate-service-account "{{ gcp_storage_service_account_name }}" --key-file="{{ config_key.path }}" diff --git a/ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml b/ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml new file mode 100644 index 0000000000..8c26cd0ef0 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml @@ -0,0 +1,8 @@ +- name: Revoke gcloud service account access + shell: gcloud auth revoke "{{ gcp_storage_service_account_name }}" + +- name: Remove key file + file: + path: "{{ config_key.path }}" + state: absent + when: config_key.path is defined diff --git a/ansible/roles/gcp-cloud-storage/tasks/main.yml b/ansible/roles/gcp-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..aa41c090ed --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: upload file to gcloud storage + include: upload.yml + tags: + - file-upload + +- name: upload batch of files to gcloud storage + include: upload-batch.yml + tags: + - upload-batch + +- name: delete batch of files from gcloud storage + include: delete-batch.yml + tags: + - delete-batch + +- name: download a file from gcloud storage + include: download.yml + tags: + - file-download \ No newline at end of file diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml new file mode 100644 index 0000000000..49abd5b822 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Upload files from a local directory gcp storage + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload.yml b/ansible/roles/gcp-cloud-storage/tasks/upload.yml new file mode 100644 index 0000000000..2f88d9407f --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/upload.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Upload to gcloud storage + shell: gsutil cp "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index f358e4f4f3..0b6a9bca4a 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -7,4 +7,4 @@ postgresql_backup_azure_container_name: postgresql-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 81ce384afa..0704d4847f 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -26,5 +26,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgresql_backup_storage }}" + dest_file_name: "{{ postgresql_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index b95eff5751..ec6a40494d 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -16,21 +16,23 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgresql_restore_storage }}" + dest_file_name: "{{ postgresql_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: ensure postgresql service is stopped service: name=postgresql state=stopped - name: wait for postgresql to be stopped wait_for: port={{ postgresql_port }} state=stopped -- name: drop cluster - command: pg_dropcluster {{ postgresql_cluster_version }} {{ postgresql_cluster_name }} - become_user: "{{ postgresql_user }}" - ignore_errors: true - -- name: create cluster - command: pg_createcluster {{ postgresql_cluster_version }} {{ postgresql_cluster_name }} - become_user: "{{ postgresql_user }}" - - name: ensure postgresql service is started service: name=postgresql state=started diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index bd034f9bb3..d314ecf925 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -37,7 +37,12 @@ azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name } aws_management_bucket_name: "" # Define the below if you are using Google Cloud -gcs_management_bucket_name: "" +gcloud_private_bucket_name: "" +gcloud_public_bucket_name: "" +gcloud_artifact_bucket_name: "" +gcloud_management_bucket_name: "" + +gcloud_private_bucket_projectId: "" # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index cf76c3d66e..bbb1a526b1 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -34,7 +34,8 @@ aws_management_bucket_user_access_key: "" aws_management_bucket_user_secret_key: "" # Define the below if you are using Google Cloud -gcs_management_bucket_service_account: | +gcp_storage_service_account_name: "" +gcp_storage_key_file: "" # gcloud service account key - refer: https://cloud.google.com/iam/docs/creating-managing-service-account-keys # The proxy key and crt values should be padded to the right by a couple of spaces From 2bf8f187e388b5923f613898d5e28edb7218abd8 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Mon, 31 Oct 2022 11:15:29 +0530 Subject: [PATCH 041/203] Update project_credential_template.json --- utils/sunbird-RC/schema/project_credential_template.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 230fdccce9..4366d2f82f 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -10,7 +10,7 @@ "id":"did:sunbird:{{osid}}", "issuanceDate": "{{osCreatedAt}}", "credentialSubject": { - "type":"{{certificateLabel}}", + "type":"project", "recipientName": "{{recipient.name}}", "projectName": "{{projectName}}", "projectId": "{{projectId}}", @@ -25,4 +25,4 @@ "name":"{{issuer.name}}", "publicKey":["{{issuer.kid}}"] } - } \ No newline at end of file + } From bab91717d6ed441afc765707a46a9455a675559b Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Mon, 31 Oct 2022 13:58:47 +0530 Subject: [PATCH 042/203] issuenceDate change --- utils/sunbird-RC/schema/project_credential_template.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 230fdccce9..3c7fdf3e31 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -8,9 +8,9 @@ "VerifiableCredential" ], "id":"did:sunbird:{{osid}}", - "issuanceDate": "{{osCreatedAt}}", + "issuanceDate": "{{completedDate}}", "credentialSubject": { - "type":"{{certificateLabel}}", + "type":"project", "recipientName": "{{recipient.name}}", "projectName": "{{projectName}}", "projectId": "{{projectId}}", From d8f60500923be72cf92acb322f95d6531422a3d8 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Mon, 31 Oct 2022 15:12:42 +0530 Subject: [PATCH 043/203] project certificate flag env --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index 6bf2405d86..d092fbd49b 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -82,3 +82,6 @@ USER_SERVICE_URL={{ml_core_user_service_URL | default("http://learner-service:90 ## portal url of env APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} + +# Project certificate enable or disable flag E.g. ON/OFF +PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} From 37b7fa647138ab2ed6d6909abf273f0ff665f64c Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 09:05:22 +0530 Subject: [PATCH 044/203] kid env of certificate issuer added --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index d092fbd49b..6a813f8cb6 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -83,5 +83,8 @@ USER_SERVICE_URL={{ml_core_user_service_URL | default("http://learner-service:90 ## portal url of env APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} -# Project certificate enable or disable flag E.g. ON/OFF +# Project certificate enable or disable flag E.g. ON/OFF PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} + +# certificate issuer KID value +CERTIFICATE_ISSUER_KID=d50937e1-9359-4451-a66a-ebee45d1d605 \ No newline at end of file From d78c40fbfcb1de9b87c4e6e37422ad0e6da5bac7 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 14:05:32 +0530 Subject: [PATCH 045/203] credential change --- utils/sunbird-RC/schema/project_credential_template.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 9f1272cfaf..3807365bb4 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -7,6 +7,7 @@ "type": [ "VerifiableCredential" ], +{% raw %} "id":"did:sunbird:{{osid}}", "issuanceDate": "{{completedDate}}", "credentialSubject": { @@ -26,3 +27,4 @@ "publicKey":["{{issuer.kid}}"] } } + {% endraw %} From 370735b55c3d1b03e1e0f7f12b53b9dde8202782 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 14:12:23 +0530 Subject: [PATCH 046/203] formating change on credential file --- .../schema/project_credential_template.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 3807365bb4..d9a520d5da 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -1,12 +1,12 @@ { - "@context": [ - "https://www.w3.org/2018/credentials/v1", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" ], - "type": [ - "VerifiableCredential" - ], + "type": [ + "VerifiableCredential" + ], {% raw %} "id":"did:sunbird:{{osid}}", "issuanceDate": "{{completedDate}}", @@ -27,4 +27,4 @@ "publicKey":["{{issuer.kid}}"] } } - {% endraw %} +{% endraw %} From ab685251e3fe0bcfa502ec8c312a6d8c0be8b85c Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 14:41:23 +0530 Subject: [PATCH 047/203] kid env variable change --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index 6a813f8cb6..7b1da9c931 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -87,4 +87,4 @@ APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} # certificate issuer KID value -CERTIFICATE_ISSUER_KID=d50937e1-9359-4451-a66a-ebee45d1d605 \ No newline at end of file +CERTIFICATE_ISSUER_KID={{certificate_issuer_kid | default("")}} \ No newline at end of file From 8f5f0daa16a251e32f399a61fed8ec492c5fd6fd Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Fri, 4 Nov 2022 11:47:35 +0530 Subject: [PATCH 048/203] Release 5.1.0 - gcp related changes (#3578) --- .gitignore | 2 +- ansible/artifacts-download.yml | 13 ++++- ansible/artifacts-upload.yml | 13 ++++- ansible/assets-upload.yml | 32 +++++++++-- ansible/bootstrap.yml | 13 ++++- ansible/deploy-plugins.yml | 55 ++++++++++++++++++- ansible/postgres-managed-service-backup.yml | 4 +- ansible/postgresql-restore.yml | 2 +- .../roles/cassandra-restore/tasks/main.yml | 13 ++++- .../gcp-cloud-storage/tasks/delete-batch.yml | 2 +- ansible/roles/grafana-backup/tasks/main.yml | 11 ++++ .../jenkins-backup-upload/tasks/main.yml | 14 ++++- ansible/roles/mongodb-backup/tasks/main.yml | 11 ++++ .../defaults/main.yml | 0 .../tasks/main.yml | 11 ++++ .../defaults/main.yml | 0 .../tasks/main.yml | 11 ++++ .../roles/prometheus-backup-v2/tasks/main.yml | 11 ++++ .../roles/prometheus-backup/tasks/main.yml | 11 ++++ .../roles/prometheus-restore/tasks/main.yml | 11 ++++ ansible/roles/redis-backup/tasks/main.yml | 11 ++++ .../dev/jobs/Core/jobs/Bootstrap/config.xml | 1 + .../DataPipeline/jobs/Bootstrap/config.xml | 1 + .../jobs/Bootstrap/config.xml | 1 + .../managed-postgres-backup/Jenkinsfile | 2 +- 25 files changed, 239 insertions(+), 17 deletions(-) rename ansible/roles/{postgres-azure-managed-service-backup => postgres-managed-service-backup}/defaults/main.yml (100%) rename ansible/roles/{postgres-azure-managed-service-backup => postgres-managed-service-backup}/tasks/main.yml (82%) rename ansible/roles/{postgres-azure-managed-service-restore => postgres-managed-service-restore}/defaults/main.yml (100%) rename ansible/roles/{postgres-azure-managed-service-restore => postgres-managed-service-restore}/tasks/main.yml (83%) diff --git a/.gitignore b/.gitignore index 9cbd220cb1..97b376da44 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ .DS_Store *.retry *.pyc -.idea \ No newline at end of file +.idea diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 2872fa1013..cb8230d44b 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -14,4 +14,15 @@ local_file_or_folder_path: "{{ artifact_path }}" storage_account_name: "{{ azure_artifact_storage_account_name }}" storage_account_key: "{{ azure_artifact_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: download artifact from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" + dest_folder_name: "{{ artifacts_container }}" + dest_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 642a9aa111..52e67448c7 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -15,4 +15,15 @@ local_file_or_folder_path: "{{ artifact_path }}" storage_account_name: "{{ azure_artifact_storage_account_name }}" storage_account_key: "{{ azure_artifact_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: upload artifact to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" + dest_folder_name: "{{ artifacts_container }}" + dest_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index db14234e4a..3809c63722 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -9,7 +9,10 @@ # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: player_cdn_storage: "{{ player_cdn_container }}" + # Azure tasks: + - name: this block consists of tasks related to azure storage + block: - name: set common azure variables set_fact: blob_container_name: "{{ player_cdn_storage }}" @@ -18,13 +21,11 @@ storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - when: cloud_service_provider == "azure" - + - name: delete files and folders from azure storage using azcopy include_role: name: azure-cloud-storage tasks_from: delete-using-azcopy.yml - when: cloud_service_provider == "azure" - name: upload batch of files to azure storage include_role: @@ -32,4 +33,27 @@ tasks_from: blob-upload-batch.yml vars: local_file_or_folder_path: "{{ assets }}" - when: cloud_service_provider == "azure" + when: cloud_service_provider == "azure" + + #GCP + - name: this block consists of tasks related to azure storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ player_cdn_storage }}" + dest_folder_path: "" + file_delete_pattern: "{{ player_cdn_storage }}/" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + + - name: delete files and folders from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: delete-batch.yml + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + local_file_or_folder_path: "{{ assets }}/*" + when: cloud_service_provider == "gcloud" diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 30d57ca52f..36d9d7b0d0 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -30,6 +30,16 @@ tags: - azure_cli +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: gcloud-cli + tags: + - gcloud_cli + - hosts: "{{ hosts| default('all') }}" become: yes gather_facts: no @@ -39,4 +49,5 @@ roles: - vm-agents-nodeexporter tags: - - node_exporter \ No newline at end of file + - node_exporter + diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index ef3ea0b44a..bf876b3f66 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -30,7 +30,7 @@ storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always - + - block: - name: delete files and folders from azure storage using azcopy include_role: @@ -43,7 +43,7 @@ - collection-editor - generic-editor - preview - + - block: - name: upload batch of files to azure storage include_role: @@ -82,3 +82,54 @@ tags: - plugins when: cloud_service_provider == "azure" + + - name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ plugin_storage }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + tags: + - always + + - block: + - name: delete files and folders from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: delete-batch.yml + vars: + file_delete_pattern: "{{ dest_folder_name }}/{{ folder_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_path: "{{ folder_name }}" + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + dest_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + when: cloud_service_provider == "gcloud" + diff --git a/ansible/postgres-managed-service-backup.yml b/ansible/postgres-managed-service-backup.yml index 1a92efb09d..05abaf41c0 100644 --- a/ansible/postgres-managed-service-backup.yml +++ b/ansible/postgres-managed-service-backup.yml @@ -3,6 +3,6 @@ vars_files: - ['{{inventory_dir}}/secrets.yml'] roles: - - postgres-azure-managed-service-backup + - postgres-managed-service-backup tags: - - postgres-azure-managed-service + - postgres-managed-service diff --git a/ansible/postgresql-restore.yml b/ansible/postgresql-restore.yml index e2d80770d1..bcec7447d6 100644 --- a/ansible/postgresql-restore.yml +++ b/ansible/postgresql-restore.yml @@ -3,6 +3,6 @@ vars_files: - ['{{inventory_dir}}/secrets.yml'] roles: - - postgres-azure-managed-service-restore + - postgres-managed-service-restore tags: - postgresql-restore diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 717e2fe113..3b2fc3ae9b 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -17,7 +17,18 @@ storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" - + +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ cassandra_backup_storage }}" + dest_file_name: "{{ cassandra_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: unarchieve restore artifact become: true unarchive: src={{user_home}}/{{ cassandra_restore_gzip_file_name }} dest={{user_home}}/ copy=no diff --git a/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml index ad0e4449d6..17fe952b16 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml @@ -3,7 +3,7 @@ include_tasks: gcloud-auth.yml - name: Delete folder recursively in gcp storage - shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }" + shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }}" async: 3600 poll: 10 diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index c898ada0d5..0f0a44a2b2 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -32,5 +32,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ grafana_backup_storage }}" + dest_file_name: "{{ grafana_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ grafana_backup_dir }}" state=absent diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index d003bed89f..32be77b7a7 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -23,4 +23,16 @@ local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ jenkins_backup_storage }}" + dest_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" + local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" + when: cloud_service_provider == "gcloud" + diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 1eefe6b077..4ae40ecd2b 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -26,6 +26,17 @@ storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" + +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ mongo_backup_storage }}" + dest_file_name: "{{ mongo_backup_file_name }}.tar.gz" + local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" + when: cloud_service_provider == "gcloud" - name: clean up backup dir after upload file: path={{ mongo_backup_dir }} state=absent diff --git a/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-managed-service-backup/defaults/main.yml similarity index 100% rename from ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml rename to ansible/roles/postgres-managed-service-backup/defaults/main.yml diff --git a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml similarity index 82% rename from ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml rename to ansible/roles/postgres-managed-service-backup/tasks/main.yml index a8261d91a3..686f4c42f6 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -54,5 +54,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgresql_backup_storage }}" + dest_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ postgresql_backup_dir }}" state=absent diff --git a/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-managed-service-restore/defaults/main.yml similarity index 100% rename from ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml rename to ansible/roles/postgres-managed-service-restore/defaults/main.yml diff --git a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml similarity index 83% rename from ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml rename to ansible/roles/postgres-managed-service-restore/tasks/main.yml index 61b1fe3eca..7df51e26b4 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -21,6 +21,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgres_backup_storage }}" + dest_file_name: "{{ postgres_backup_filename }}" + local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" + when: cloud_service_provider == "gcloud" + - name: unarchive artifact unarchive: src={{ postgresql_restore_dir }}/{{ postgres_backup_filename }} dest={{ postgresql_restore_dir }}/ copy=no diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 071ed395e1..0cafacb627 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -29,6 +29,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ prometheus_backup_storage }}" + dest_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + when: cloud_service_provider == "gcloud" + - name: Deleting snapshot file: path: "{{ prometheus_data_dir }}/snapshots/{{ snapshot_name }}" diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index f9aaa54073..32cffa6e5c 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -38,5 +38,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ prometheus_backup_storage }}" + dest_file_name: "{{ prometheus_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ prometheus_backup_dir }}" state=absent diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 40c9bd9225..843ebe4598 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -14,6 +14,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ prometheus_backup_storage }}" + dest_file_name: "{{ prometheus_backup_filename }}" + local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" + when: cloud_service_provider == "gcloud" + - name: ensure prometheus is stopped shell: "docker service scale {{prometheus_service_name}}=0 && sleep 10" delegate_to: "{{manager_host}}" #variable is passed as extra vars from jenkins diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index be66ea5292..51f7ab63ff 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -26,6 +26,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ nodebb_redis_backup_storage }}" + dest_file_name: "{{ redis_backup_file_name }}" + local_file_or_folder_path: "{{ redis_backup_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path={{ redis_backup_dir }} state=absent diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index 7663c96890..20d7006b52 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml index 39884cf71a..8c82b404e9 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml index 9f26dbf9d9..b3132dfd16 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/pipelines/backup/managed-postgres-backup/Jenkinsfile b/pipelines/backup/managed-postgres-backup/Jenkinsfile index 66acf4baee..f27e665bd5 100644 --- a/pipelines/backup/managed-postgres-backup/Jenkinsfile +++ b/pipelines/backup/managed-postgres-backup/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/postgres-managed-service-backup.yml" - ansibleExtraArgs = "--tags postgres-azure-managed-service --extra-vars \"postgres_env=${params.postgres_env}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags postgres-managed-service --extra-vars \"postgres_env=${params.postgres_env}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 4174d6cf925e09a27683da0fe02e84003d14a2ce Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:11:36 +0530 Subject: [PATCH 049/203] Release 5.1.0 gcp related changes (#3580) --- ansible/desktop-faq-upload.yml | 45 ++++++++++++++++++++- ansible/dial_upload-schema.yml | 14 ++++++- ansible/kp_upload-schema.yml | 13 +++++- ansible/plugins.yml | 21 ++++++++++ ansible/roles/cert-templates/tasks/main.yml | 13 +++++- ansible/roles/desktop-deploy/tasks/main.yml | 26 +++++++++++- ansible/uploadFAQs.yml | 13 ++++++ 7 files changed, 140 insertions(+), 5 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 0cdb89a07d..f20f0d7eeb 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -49,4 +49,47 @@ tags: - upload-chatbot-config - upload-batch - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ upload_storage }}" + dest_file_name: "{{ destination_path }}" + dest_folder_path: "{{ destination_path }}" + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + tags: + - always + + - block: + - name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_private_bucket_name }}" + tags: + - upload-label + + - block: + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "gcloud" diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index ba7abf627b..a93a900263 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -37,4 +37,16 @@ local_file_or_folder_path: "dial_schema_template_files" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ dial_plugin_storage }}" + dest_folder_path: "schemas/local" + local_file_or_folder_path: "dial_schema_template_files" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + when: cloud_service_provider == "gcloud" + diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 7d7163437b..a4f6bda83a 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -21,4 +21,15 @@ local_file_or_folder_path: "{{ source_name }}" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ plugin_storage }}" + dest_folder_path: "schemas/local" + local_file_or_folder_path: "{{ source_name }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/plugins.yml b/ansible/plugins.yml index 35e34578d0..ab32d9f756 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -33,3 +33,24 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml when: cloud_service_provider == "azure" + + - name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ plugin_storage }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + file_delete_pattern: "content-plugins/{{ plugins_name }}" + dest_folder_path: "/content-plugins/{{ plugins_name }}" + local_file_or_folder_path: "{{ source_file }}" + + - name: delete files and folders from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: delete-batch.yml + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index ee05f2adb3..acecc4d6f4 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -42,4 +42,15 @@ local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" storage_account_name: "{{ azure_private_storage_account_name }}" storage_account_key: "{{ azure_private_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + +- name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ cert_service_storage }}" + dest_folder_path: "" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + gcp_bucket_name: "{{ gcloud_private_bucket_name }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index e7763604c1..4ce4da3fb6 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -73,4 +73,28 @@ vars: blob_container_folder_path: "/latest" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + +- name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ offline_installer_storage }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_path: "" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_path: "latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "gcloud" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 8447fe4e47..52923e1bf4 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -29,6 +29,19 @@ with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "azure" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ upload_storage }}" + dest_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "gcloud" tags: - upload-faqs - upload-RC-schema From 41445d96423b1de1530a1c65bc0a33f83cae393f Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:03:58 +0530 Subject: [PATCH 050/203] Add GCP related vars for KP and DP (#3586) --- .../ansible/inventory/dev/DataPipeline/common.yml | 9 +++++++++ .../ansible/inventory/dev/DataPipeline/secrets.yml | 4 ++++ .../ansible/inventory/dev/KnowledgePlatform/common.yml | 9 +++++++++ .../ansible/inventory/dev/KnowledgePlatform/secrets.yml | 4 ++++ 4 files changed, 26 insertions(+) diff --git a/private_repo/ansible/inventory/dev/DataPipeline/common.yml b/private_repo/ansible/inventory/dev/DataPipeline/common.yml index 348c74dc6b..ef8432539b 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/common.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/common.yml @@ -1,5 +1,6 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # +cloud_service_provider: "" # Your cloud service provider name. Supported values are aws, azure, gcloud domain_name: "" # your domain name like example.com # docker hub details dockerhub: "change.docker.url" # docker hub username or url incase of private registry @@ -168,3 +169,11 @@ processing_kafka_overriden_topics: - name: ml.observation.druid retention_time: 86400000 replication_factor: 1 + +# Define the below if you are using Google Cloud +gcloud_private_bucket_name: "" +gcloud_public_bucket_name: "" +gcloud_artifact_bucket_name: "" +gcloud_management_bucket_name: "" + +gcloud_private_bucket_projectId: "" diff --git a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml index d18a6d1e0e..c37b74d8fe 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml @@ -26,3 +26,7 @@ dp_vault_data_exhaust_token: # slack api token # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so dp_vault_artifacts_container: artifacts + +# Define the below if you are using Google Cloud +gcp_storage_service_account_name: "" +gcp_storage_key_file: "" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 24f0320615..7f21987f82 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -1,6 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # # docker hub details +cloud_service_provider: "" # Your cloud service provider name. Supported values are aws, azure, gcloud dockerhub: "change.docker.url" # docker hub username or url incase of private registry private_ingressgateway_ip: "" # your private kubernetes load balancer ip domain_name: "" # your domain name like example.com @@ -32,3 +33,11 @@ plugin_container_name: "{{azure_public_container}}" kp_schema_base_path: "{{proto}}://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{plugin_container_name}}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins + +# Define the below if you are using Google Cloud +gcloud_private_bucket_name: "" +gcloud_public_bucket_name: "" +gcloud_artifact_bucket_name: "" +gcloud_management_bucket_name: "" + +gcloud_private_bucket_projectId: "" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml index fb1af29c0a..ef5db134da 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml @@ -18,3 +18,7 @@ lp_vault_youtube_api_key: # youtube api token if you want # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so lp_vault_graph_passport_key: "long-secret-to-calm-entropy-gods" + +# Define the below if you are using Google Cloud +gcp_storage_service_account_name: "" +gcp_storage_key_file: "" From 1fbc256e18e188bd4a14a943b9d05a461c4950a9 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:03:38 +0530 Subject: [PATCH 051/203] fix: SB-31155 updated references for upstream_url --- ansible/inventory/env/group_vars/all.yml | 3 ++- ansible/roles/stack-proxy/defaults/main.yml | 7 +++++-- ansible/roles/stack-sunbird/defaults/main.yml | 7 +++++-- .../ansible/roles/helm-daemonset/defaults/main.yml | 7 +++++-- .../ansible/roles/helm-deploy/defaults/main.yml | 7 +++++-- private_repo/ansible/inventory/dev/Core/common.yml | 13 ++++++++++++- 6 files changed, 34 insertions(+), 10 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index df20586566..d0dfe156b1 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -36,7 +36,8 @@ sunbird_keycloak_required_action_link_expiration_seconds: 2592000 sunbird_es_port: 9300 mail_server_port: 587 -upstream_url: "{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" +# SB-31155 +#upstream_url: "{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" # Learner sunbird_user_profile_field_default_visibility: private diff --git a/ansible/roles/stack-proxy/defaults/main.yml b/ansible/roles/stack-proxy/defaults/main.yml index 579709e412..dd4bda152d 100644 --- a/ansible/roles/stack-proxy/defaults/main.yml +++ b/ansible/roles/stack-proxy/defaults/main.yml @@ -39,6 +39,9 @@ ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" kibana_service: "{{swarm_dashboard}}:5601" -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 3b5946e333..8936a190ec 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -212,8 +212,11 @@ prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" # Override this dictionary in your common.yaml proxy: diff --git a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml index 59eb136773..9822dd5626 100644 --- a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml @@ -33,6 +33,9 @@ ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index ed2c7f5aca..bf1fed2ff7 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -32,8 +32,11 @@ registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: proxy_custom_config: -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" discussion_upstream_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index d314ecf925..1b32d003e7 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -44,6 +44,14 @@ gcloud_management_bucket_name: "" gcloud_private_bucket_projectId: "" +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# upstream_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }}/{{ content_storage }} +# AWS +# upstream_url: # Geetha to fill this url based on AWS role vars +# Azure +upstream_url: "{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public + # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly @@ -70,6 +78,7 @@ sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml + # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # # From: SBSMS @@ -115,9 +124,11 @@ postgres: db_admin_user: postgres db_admin_password: "{{core_vault_postgres_password}}" +# Generic variable for any cloud provider +content_storage: "{{ sunbird_content_azure_storage_container }}" + # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -upstream_url: "{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" # Proxy url to get /assets/public plugin_upstream_url: "{{upstream_url}}" azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" From 32d2c59e0d8e7fbef7867238c72a0c384bdd7c27 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:09:03 +0530 Subject: [PATCH 052/203] fix: adding https in url Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/common.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 1b32d003e7..650e04c0e4 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -50,7 +50,7 @@ gcloud_private_bucket_projectId: "" # AWS # upstream_url: # Geetha to fill this url based on AWS role vars # Azure -upstream_url: "{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public +upstream_url: "https://{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly From 079c554336373f2d3d67dbc69f2f544fa9c5926c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:10:10 +0530 Subject: [PATCH 053/203] fix: updated sunbird_cloud_storage_urls var to use upstream_url Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index d0dfe156b1..4b0975b4b2 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -364,7 +364,7 @@ content_service_blacklisted_resourcetype: '' content_service_whitelisted_resourcetype: '' content_service_whitelisted_mimetype: '' content_service_blacklisted_mimetype: '' -sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/' +sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,{{ upstream_url }}/' sunbird_email_max_recipients_limit: 100 sunbird_cassandra_consistency_level: local_quorum sunbird_cassandra_replication_strategy: '{"class":"SimpleStrategy","replication_factor":"1"}' From 6664891531d8ecf999abed6da51ef8c547b68367 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:26:26 +0530 Subject: [PATCH 054/203] fix: use cloud_storage_url var as a base for upstream_url --- ansible/inventory/env/group_vars/all.yml | 4 +++- ansible/roles/stack-sunbird/defaults/main.yml | 3 ++- private_repo/ansible/inventory/dev/Core/common.yml | 7 ++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 4b0975b4b2..96337704c7 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -509,7 +509,9 @@ sunbird_portal_updateLoginTimeEnabled: false # Desktop app vars #sunbird_offline_azure_storage_account: "" #added this var for adopter usecase offline_installer_container_name: "{{env}}-offlineinstaller" -cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" + +# SB-31155 +#cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # Search-service search_index_host: "{{ groups['composite-search-cluster']|join(':9200,')}}:9200" diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 8936a190ec..6196962b3e 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -78,7 +78,8 @@ sunbird_portal_cdn_url: sunbird_dataservice_url: sunbird_background_actor_port: sunbird_app_url: -sunbird_image_storage_url: +# SB-31155 +#sunbird_image_storage_url: sunbird_telemetry_dispatchers: kafka content_service_whitelisted_channels: content_service_blacklisted_channels: diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 650e04c0e4..811677d8c1 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -46,11 +46,11 @@ gcloud_private_bucket_projectId: "" # Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) # GCP -# upstream_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }}/{{ content_storage }} +# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} # AWS -# upstream_url: # Geetha to fill this url based on AWS role vars +# cloud_storage_url: # Geetha to fill this url based on AWS role vars # Azure -upstream_url: "https://{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public +cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly @@ -125,6 +125,7 @@ postgres: db_admin_password: "{{core_vault_postgres_password}}" # Generic variable for any cloud provider +upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" # Proxy url to get /assets/public content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars From edc608e04a049a30a5abfe10e0539f34aed0574e Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:33:37 +0530 Subject: [PATCH 055/203] fix: generalizing dial_service_schema_base_path var Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 96337704c7..c27674492f 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -556,5 +556,9 @@ sunbird_trampoline_android_keycloak_client_id: trampoline-android sunbird_trampoline_desktop_keycloak_client_id: trampoline-desktop # DIAL-service schema +# SB-31155 - This should be deprecated in future in favour of dial_plugin_storage dial_plugin_container_name: "sunbird-dial-{{env}}" -dial_service_schema_base_path: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{dial_plugin_container_name}}/jsonld-schema/local" + +# SB-31155 - Adding a generialzed variable which can be used for any CSP +dial_plugin_storage: "{{ dial_plugin_container_name }}" +dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" From 2e9403e531e09cb76cc7190c3ca12a3b1fc1cc0c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:37:20 +0530 Subject: [PATCH 056/203] fix: generalized ml_analytics_evidence_base_url var Signed-off-by: Keshav Prasad --- ansible/roles/ml-analytics-service/defaults/main.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 5c5d87dace..b3ede22f5d 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -10,8 +10,14 @@ ml_analytics_survey_app_name: "{{ ml_survey_app_name | default('sunbirdsurvey') ml_analytics_integrated_app_name: "{{ ml_integrated_app_name | default('sunbird') }}" ml_analytics_integrated_portal: "{{ ml_integrated_portal | default('dev.sunbird.portal') }}" ml_analytics_survey_service: "http://{{private_ingressgateway_ip}}/ml-survey" + +# SB-31155 +# This should be deprecated in future in favour of ml_analytics_public_storage ml_analytics_public_container: "{{ ml_analytics_container | default('samiksha') }}" -ml_analytics_evidence_base_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ ml_analytics_public_container }}/" + +# SB-31155 - Adding a generialzed variable which can be used for any CSP +ml_analytics_public_storage: "{{ ml_analytics_public_container }}" +ml_analytics_evidence_base_url: "{{ cloud_storage_url }}/{{ ml_analytics_public_storage }}/" ml_analytics_mongodb_url: "{{ml_mongodb_host | default(groups['mongo_master'][0]+':27017')}}" ml_analytics_mongo_db_name: "{{ml_mongodb | default('ml-survey')}}" ml_analytics_mongo_observation_submission_collection: "observationSubmissions" From 8072b863cfd82b4083c2724064b90d2d6ddcec45 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:51:46 +0530 Subject: [PATCH 057/203] fix: removed unnessary vars and redefine in all.yml Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 9 ++++++++- ansible/roles/stack-proxy/defaults/main.yml | 9 +-------- ansible/roles/stack-sunbird/defaults/main.yml | 8 -------- .../templates/sunbird_knowledge-mw-service.env | 1 - .../ansible/roles/helm-daemonset/defaults/main.yml | 7 ------- kubernetes/ansible/roles/helm-deploy/defaults/main.yml | 6 ------ 6 files changed, 9 insertions(+), 31 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index c27674492f..f73d598ce2 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -376,7 +376,6 @@ sunbird_otp_length: 6 sunbird_help_link_visibility: FALSE # not required -sunbird_image_storage_url: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/dial/" vault_auth_key: "{{core_vault_auth_key}}" vault_sender: "{{core_vault_sender}}" vault_country: "{{core_vault_country}}" @@ -508,8 +507,16 @@ sunbird_portal_updateLoginTimeEnabled: false # Desktop app vars #sunbird_offline_azure_storage_account: "" #added this var for adopter usecase + +# SB-31155 - This should be deprecated in future in favour of offline_installer_storage offline_installer_container_name: "{{env}}-offlineinstaller" +# SB-31155 - Adding a generialzed variable which can be used for any CSP +offline_installer_storage: "{{ offline_installer_container_name }}" + +# SB-31155 - Removed multiple declarations and moved here +sunbird_offline_azure_storage_account_url: "{{ cloud_storage_url }}/{{ offline_installer_storage }}" + # SB-31155 #cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" diff --git a/ansible/roles/stack-proxy/defaults/main.yml b/ansible/roles/stack-proxy/defaults/main.yml index dd4bda152d..ab1524cf72 100644 --- a/ansible/roles/stack-proxy/defaults/main.yml +++ b/ansible/roles/stack-proxy/defaults/main.yml @@ -37,11 +37,4 @@ prometheus_route_prefix: prometheus prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" -kibana_service: "{{swarm_dashboard}}:5601" - -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" +kibana_service: "{{swarm_dashboard}}:5601" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 6196962b3e..97ffbc1584 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -78,8 +78,6 @@ sunbird_portal_cdn_url: sunbird_dataservice_url: sunbird_background_actor_port: sunbird_app_url: -# SB-31155 -#sunbird_image_storage_url: sunbird_telemetry_dispatchers: kafka content_service_whitelisted_channels: content_service_blacklisted_channels: @@ -213,12 +211,6 @@ prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" # Override this dictionary in your common.yaml proxy: # repository: 'proxy' diff --git a/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env b/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env index 6a13ddadfd..c7b0533c2a 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env @@ -7,7 +7,6 @@ sunbird_keycloak_client_id={{sunbird_keycloak_client_id}} sunbird_keycloak_public={{sunbird_keycloak_public}} sunbird_cache_store={{sunbird_cache_store}} sunbird_cache_ttl={{sunbird_cache_ttl}} -sunbird_image_storage_url={{sunbird_image_storage_url}} sunbird_azure_account_name={{sunbird_public_storage_account_name}} sunbird_azure_account_key={{sunbird_public_storage_account_key}} sunbird_dial_code_registry_url=https://{{proxy_server_name}}/dial/ diff --git a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml index 9822dd5626..511f5c2acd 100644 --- a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml @@ -32,10 +32,3 @@ prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: - -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index bf1fed2ff7..2e19f88194 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -32,13 +32,7 @@ registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: proxy_custom_config: -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" discussion_upstream_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" proxy_cache_path: large_cache: From 52539a42dc62fc8baf76f7d9054b5153402b4f60 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:57:28 +0530 Subject: [PATCH 058/203] fix: updated desktop_app_storage_url var reference Signed-off-by: Keshav Prasad --- ansible/roles/stack-sunbird/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 97ffbc1584..8b600f04a1 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -82,7 +82,7 @@ sunbird_telemetry_dispatchers: kafka content_service_whitelisted_channels: content_service_blacklisted_channels: sunbird_env_logo_url: -desktop_app_storage_url: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{offline_installer_container_name}}" +desktop_app_storage_url: "{{ sunbird_offline_azure_storage_account_url }}" telemetry_logstash_heap_size: 512m telemetry_logstash_replicas: 1 From c40dc026dbc5c4e7cbc89e141452cf7d53d6707a Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:03:13 +0530 Subject: [PATCH 059/203] fix: generalized h5p_library_path var Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 3 +++ .../stack-sunbird/templates/content-service_application.conf | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index f73d598ce2..f1ed8628f2 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -569,3 +569,6 @@ dial_plugin_container_name: "sunbird-dial-{{env}}" # SB-31155 - Adding a generialzed variable which can be used for any CSP dial_plugin_storage: "{{ dial_plugin_container_name }}" dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" + +# SB-31155 - Moved to the installation public container for now (same place where keycloaka and java artifacts are stored) +h5p_library_path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index d33dbecf6f..3fa9ba9a99 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -382,7 +382,7 @@ content { } h5p { library { - path: "{{ h5p_library_path | default('https://sunbirddev.blob.core.windows.net/sunbird-content-dev/h5p-standalone-1.3.4.zip') }}" + path: "{{ h5p_library_path }}" } } copy { From f6fa71b055c503c3b1a3792ce23d1727afa73790 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:14:37 +0530 Subject: [PATCH 060/203] fix: updated player.env to use generalized var Signed-off-by: Keshav Prasad --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 0d1c34f45b..2afd3af54c 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -162,7 +162,7 @@ sunbird_base_proto={{sunbird_base_proto | default(proto)}} ml_survey_url={{ml_survey_url|default(proto+'://' +domain_name) }} #Release-4.1.0 -sunbird_azure_storage_account_name=https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/ +sunbird_azure_storage_account_name={{ cloud_storage_url }}/ #Release-4.1.0 sunbird_google_oauth_ios_clientId={{sunbird_google_oauth_ios_clientId | default("")}} From cdb0b41cac68f4a370017150ed8cbe8f4a479007 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:18:21 +0530 Subject: [PATCH 061/203] fix: generalized discussion_upstream_url Signed-off-by: Keshav Prasad --- kubernetes/ansible/roles/helm-deploy/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index 2e19f88194..6c5c925747 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -32,7 +32,7 @@ registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: proxy_custom_config: -discussion_upstream_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" +discussion_upstream_url: "{{ cloud_storage_url }}" proxy_cache_path: large_cache: From f409d6f1eba6f7ca1ef034749aa12c85220b34f7 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:23:53 +0530 Subject: [PATCH 062/203] fix: generalizing CACHE_CONTEXT_URLS Signed-off-by: Keshav Prasad --- kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 index 9895ebf05c..62c9114a76 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 @@ -35,4 +35,4 @@ certificatesignenv: CERTIFICATE_PRIVATE_KEY: |- {{ CERTIFICATE_PRIVATE_KEY | default("''") | indent(width=4) }} SIGNING_KEY_TYPE: "{{ SIGNING_KEY_TYPE|default('RSA')}}" - CACHE_CONTEXT_URLS: "{{ cache_context_urls | default('https://' + sunbird_public_storage_account_name + '.blob.core.windows.net/' + sunbird_content_azure_storage_container + '/schema/v1_context.json,https://' + sunbird_public_storage_account_name + '.blob.core.windows.net/' + sunbird_content_azure_storage_container + '/schema/sunbird_context.json,https://' + sunbird_public_storage_account_name + '.blob.core.windows.net/' + sunbird_content_azure_storage_container + '/schema/credential_template.json')}}" \ No newline at end of file + CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,upstream_url + '/schema/sunbird_context.json,upstream_url + '/schema/credential_template.json')}}" \ No newline at end of file From ebaa1771e20bdcde6b879355c9d70a47a24ef198 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:25:28 +0530 Subject: [PATCH 063/203] fix: generalizing credentialTemplate param Signed-off-by: Keshav Prasad --- .../sunbird-RC/registry/schemas/ProjectCertificate.json | 2 +- .../sunbird-RC/registry/schemas/TrainingCertificate.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index 98f3bb91e9..0035464abb 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -70,6 +70,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_credential_template.json" + "credentialTemplate": "{{ upstream_url }}/schema/project_credential_template.json" } } \ No newline at end of file diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json index 2906929f2d..5187b08e81 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json @@ -69,6 +69,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{upstream_url}}/schema/credential_template.json" + "credentialTemplate": "https://{{ upstream_url }}/schema/credential_template.json" } } From 68111485f45ae603223a68f2c07f655a97dcdf1a Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:34:32 +0530 Subject: [PATCH 064/203] fix: generalizing kp_schema_base_path and updating reference to upstream_url Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 2 -- .../ansible/inventory/dev/Core/common.yml | 4 ++-- .../inventory/dev/KnowledgePlatform/common.yml | 17 +++++++++++++++-- .../sunbird-RC/schema/credential_template.json | 4 ++-- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index f1ed8628f2..238ab4dd65 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -36,8 +36,6 @@ sunbird_keycloak_required_action_link_expiration_seconds: 2592000 sunbird_es_port: 9300 mail_server_port: 587 -# SB-31155 -#upstream_url: "{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" # Learner sunbird_user_profile_field_default_visibility: private diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 811677d8c1..594a415e25 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -130,11 +130,11 @@ content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -plugin_upstream_url: "{{upstream_url}}" +plugin_upstream_url: "{{ upstream_url }}" azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" plugin_container_name: "{{sunbird_content_azure_storage_container}}" -kp_schema_base_path: "{{proto}}://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{plugin_container_name}}/schemas/local" +kp_schema_base_path: "{{ upstream_url }}/schemas/local" keycloak_api_management_user_email: "admin@sunbird.org" sunbird_installation_email: "admin@sunbird.org" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 7f21987f82..94df144c58 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -29,8 +29,21 @@ neo4j_enterprise: false # Set this to true if you use # Sensible defaults which you need not change - But if you would like to change, you are free to do so ekstep_domain_name: "{{ proto }}://{{ domain_name }}" artifacts_container: artifacts -plugin_container_name: "{{azure_public_container}}" -kp_schema_base_path: "{{proto}}://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{plugin_container_name}}/schemas/local" + +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} +# AWS +# cloud_storage_url: # Geetha to fill this url based on AWS role vars +# Azure +cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" + +plugin_container_name: "{{ azure_public_container }}" + +# Generic variable for any cloud provider +plugin_storage: "{{ plugin_container_name }}" + +kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins diff --git a/utils/sunbird-RC/schema/credential_template.json b/utils/sunbird-RC/schema/credential_template.json index 8f467986cc..f96a3c0528 100644 --- a/utils/sunbird-RC/schema/credential_template.json +++ b/utils/sunbird-RC/schema/credential_template.json @@ -1,7 +1,7 @@ { "@context": [ - "https://{{upstream_url}}/schema/v1_context.json", - "https://{{upstream_url}}/schema/sunbird_context.json" + "https://{{ upstream_url }}/schema/v1_context.json", + "https://{{ upstream_url }}/schema/sunbird_context.json" ], "type": [ "VerifiableCredential" From 511382e55312e0cb7c87977f81873524c2d3e87e Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:36:45 +0530 Subject: [PATCH 065/203] fix: updated references of templates to upstream_url Signed-off-by: Keshav Prasad --- utils/sunbird-RC/schema/project_credential_template.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index d9a520d5da..6041f69d41 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -1,8 +1,8 @@ { "@context": [ "https://www.w3.org/2018/credentials/v1", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" + "{{ upstream_url }}/schema/project_v1_context.json", + "{{ upstream_url }}/schema/project_sunbird_context.json" ], "type": [ "VerifiableCredential" From dec5289aed91445edddde3a235b29b2e8d0a9410 Mon Sep 17 00:00:00 2001 From: Surabhi Date: Mon, 14 Nov 2022 16:04:05 +0530 Subject: [PATCH 066/203] sunbird cdn configuration --- ansible/roles/stack-sunbird/templates/inbound.env | 13 ++++++++++--- .../roles/stack-sunbird/templates/transformer.env | 14 +++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index c8ed1a5157..c9bc2033a8 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -55,12 +55,19 @@ REDIS_DB_INDEX={{redis_db_index_uci | default('7')}} #Azure Config AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_key}} -SELECTED_FILE_CDN=azure +AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} +AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} #Netcore NETCORE_WHATSAPP_AUTH_TOKEN={{uci_netcore_whatsapp_token}} NETCORE_WHATSAPP_SOURCE={{uci_netcore_whatsapp_source}} NETCORE_WHATSAPP_URI={{uci_netcore_whatsapp_uri | default('https://waapi.pepipost.com/api/v2/')}} +#Sunbird CDN Configuration +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} + +#Selected CDN Configuration +SELECTED_FILE_CDN=sunbird diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index fd2e6d00aa..f5c2f7b3a5 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -72,6 +72,14 @@ POSTHOG_EVENT_ENABLED=FALSE #Azure Config AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_key}} -SELECTED_FILE_CDN=azure \ No newline at end of file +AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} +AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} + +#Sunbird CDN Configuration +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} + +#Selected CDN Configuration +SELECTED_FILE_CDN=sunbird \ No newline at end of file From 0dbae8510053debc98081a5a0e4c8a34848b028f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:13:25 +0530 Subject: [PATCH 067/203] fix: remove unnessary vars Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 8 -------- .../stack-sunbird/templates/sunbird_learner-service.env | 1 - .../roles/stack-sunbird/templates/sunbird_lms-service.env | 1 - private_repo/ansible/inventory/dev/Core/common.yml | 2 +- 4 files changed, 1 insertion(+), 11 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 238ab4dd65..9fe037507c 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -408,8 +408,6 @@ sunbird_health_check_enable: 'true' ## Release 1.15 ## sunbird_keycloak_user_federation_provider_id: "{{core_vault_sunbird_keycloak_user_federation_provider_id}}" -# Learner-service -sunbird_course_metrics_base_url: https://{{sunbird_private_storage_account_name}}.blob.core.windows.net/ sunbird_gzip_size_threshold: 262144 prometheus_mount_point: "/root/dockerdata/prometheus/data/" @@ -503,9 +501,6 @@ content_import_remove_props: '["downloadUrl","variants","previewUrl","streamingU #Sunbird-Portal release-2.6.5 # sunbird_portal_updateLoginTimeEnabled: false -# Desktop app vars -#sunbird_offline_azure_storage_account: "" #added this var for adopter usecase - # SB-31155 - This should be deprecated in future in favour of offline_installer_storage offline_installer_container_name: "{{env}}-offlineinstaller" @@ -515,9 +510,6 @@ offline_installer_storage: "{{ offline_installer_container_name }}" # SB-31155 - Removed multiple declarations and moved here sunbird_offline_azure_storage_account_url: "{{ cloud_storage_url }}/{{ offline_installer_storage }}" -# SB-31155 -#cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" - # Search-service search_index_host: "{{ groups['composite-search-cluster']|join(':9200,')}}:9200" compositesearch_index_name: "compositesearch" diff --git a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env index 79d0bfe1e3..3b6a3f122e 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env @@ -86,7 +86,6 @@ sunbird_time_zone={{sunbird_time_zone}} sunbird_health_check_enable={{sunbird_health_check_enable}} sunbird_keycloak_user_federation_provider_id={{core_vault_sunbird_keycloak_user_federation_provider_id}} sunbird_gzip_enable={{sunbird_gzip_enable}} -sunbird_course_metrics_base_url={{sunbird_course_metrics_base_url}} sunbird_gzip_size_threshold={{sunbird_gzip_size_threshold | default(262144)}} sunbird_analytics_blob_account_name={{sunbird_private_storage_account_name}} sunbird_analytics_blob_account_key={{sunbird_private_storage_account_key}} diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 6b790eb735..1b3fdba3ca 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -87,7 +87,6 @@ sunbird_time_zone={{sunbird_time_zone}} sunbird_health_check_enable={{sunbird_health_check_enable}} sunbird_keycloak_user_federation_provider_id={{core_vault_sunbird_keycloak_user_federation_provider_id}} sunbird_gzip_enable={{sunbird_gzip_enable}} -sunbird_course_metrics_base_url={{sunbird_course_metrics_base_url}} sunbird_gzip_size_threshold={{sunbird_gzip_size_threshold | default(262144)}} sunbird_analytics_blob_account_name={{sunbird_private_storage_account_name}} sunbird_analytics_blob_account_key={{sunbird_private_storage_account_key}} diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 594a415e25..b73c466a45 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -125,7 +125,7 @@ postgres: db_admin_password: "{{core_vault_postgres_password}}" # Generic variable for any cloud provider -upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" # Proxy url to get /assets/public +upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars From 52004154e58728c131ff6dcfa8e85693df037ec8 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:20:22 +0530 Subject: [PATCH 068/203] fix: reordered comments Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/common.yml | 7 +++++-- .../ansible/inventory/dev/KnowledgePlatform/common.yml | 3 ++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index b73c466a45..781d798112 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -77,8 +77,13 @@ proto: https # http or https, preferably https sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml +# SB-31155 - This should be deprecated in future in favour of content_storage sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml +# SB-31155 - Adding a generialzed variable which can be used for any CSP +content_storage: "{{ sunbird_content_azure_storage_container }}" + + # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # # From: SBSMS @@ -124,9 +129,7 @@ postgres: db_admin_user: postgres db_admin_password: "{{core_vault_postgres_password}}" -# Generic variable for any cloud provider upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" -content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 94df144c58..e4fdbf2b14 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -38,9 +38,10 @@ artifacts_container: artifacts # Azure cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" +# SB-31155 - This should be deprecated in future in favour of plugin_storage plugin_container_name: "{{ azure_public_container }}" -# Generic variable for any cloud provider +# SB-31155 - Adding a generialzed variable which can be used for any CSP plugin_storage: "{{ plugin_container_name }}" kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" From d94747a96b9b173b03c216fa351543c0952b7302 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:26:07 +0530 Subject: [PATCH 069/203] fix: typo fix Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 4 ++-- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- private_repo/ansible/inventory/dev/Core/common.yml | 2 +- .../ansible/inventory/dev/KnowledgePlatform/common.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 9fe037507c..9e268e1168 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -504,7 +504,7 @@ sunbird_portal_updateLoginTimeEnabled: false # SB-31155 - This should be deprecated in future in favour of offline_installer_storage offline_installer_container_name: "{{env}}-offlineinstaller" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP offline_installer_storage: "{{ offline_installer_container_name }}" # SB-31155 - Removed multiple declarations and moved here @@ -556,7 +556,7 @@ sunbird_trampoline_desktop_keycloak_client_id: trampoline-desktop # SB-31155 - This should be deprecated in future in favour of dial_plugin_storage dial_plugin_container_name: "sunbird-dial-{{env}}" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP dial_plugin_storage: "{{ dial_plugin_container_name }}" dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index b3ede22f5d..fd5e62c5f4 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -15,7 +15,7 @@ ml_analytics_survey_service: "http://{{private_ingressgateway_ip}}/ml-survey" # This should be deprecated in future in favour of ml_analytics_public_storage ml_analytics_public_container: "{{ ml_analytics_container | default('samiksha') }}" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP ml_analytics_public_storage: "{{ ml_analytics_public_container }}" ml_analytics_evidence_base_url: "{{ cloud_storage_url }}/{{ ml_analytics_public_storage }}/" ml_analytics_mongodb_url: "{{ml_mongodb_host | default(groups['mongo_master'][0]+':27017')}}" diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 781d798112..831a444922 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -80,7 +80,7 @@ environment_id: "10000003" # A 8 digit number fo # SB-31155 - This should be deprecated in future in favour of content_storage sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP content_storage: "{{ sunbird_content_azure_storage_container }}" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index e4fdbf2b14..831eaf7c4a 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -41,7 +41,7 @@ cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core. # SB-31155 - This should be deprecated in future in favour of plugin_storage plugin_container_name: "{{ azure_public_container }}" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP plugin_storage: "{{ plugin_container_name }}" kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" From 0a7856c7144da6b2f763e79717ac36ff46b7b686 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:35:00 +0530 Subject: [PATCH 070/203] fix: remove duplicate vars Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 9e268e1168..c479f6693b 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -85,7 +85,6 @@ keycloak_postgres_host: "{{groups['postgres'][0]}}" #Private IP of Postgres ser kong_postgres_host: "{{groups['postgres'][0]}}" #Private IP of Postgres server uci_postgres_host: "{{groups['postgres'][0]}}" #Private IP of Postgres server sunbird_cassandra_host: "{{groups['cassandra']|join(',')}}" #Private IP of Cassandra server -sunbird_es_host: "{{groups['es']| join(',')}}" ## Application server configurations sunbird_analytics_api_base_url: "http://analytics-service.{{namespace}}.svc.cluster.local:9000" @@ -339,13 +338,11 @@ kong__test_jwt: "{{ core_vault_sunbird_api_auth_token }}" ####### App ES ######## app_es_etc_cluster_name: "{{env}}" app_es_etc_discovery_zen_minimum_master_nodes: "{{groups['es']| length | int}}" -app_es_snapshot_host: "{{ groups['es'][0] }}" app_es_restore_host: "{{ groups['es'][0] }}" app_es_snapshot_base_path: application #######Log Es log_es_etc_cluster_name: "{{env}}-log" -log_es_snapshot_host: "{{ groups['log-es'][0] }}" log_es_restore_host: "{{ groups['log-es'][0] }}" log_es_host: "{{ groups['log-es'][0] }}" From 4867b294f19eae2df99bfb33a8541e74f2ce6926 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 18:13:25 +0530 Subject: [PATCH 071/203] fix: moving few vars to default Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 7 ++++++- private_repo/ansible/inventory/dev/Core/common.yml | 8 +------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index c479f6693b..1aaa166d5c 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -557,5 +557,10 @@ dial_plugin_container_name: "sunbird-dial-{{env}}" dial_plugin_storage: "{{ dial_plugin_container_name }}" dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" +# SB-31155 - Moving few vars from private repo template to here +content_storage: "{{ sunbird_content_azure_storage_container }}" +upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" +plugin_upstream_url: "{{ upstream_url }}" +kp_schema_base_path: "{{ upstream_url }}/schemas/local" # SB-31155 - Moved to the installation public container for now (same place where keycloaka and java artifacts are stored) -h5p_library_path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" \ No newline at end of file +h5p_library_path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 831a444922..286c957102 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -77,12 +77,9 @@ proto: https # http or https, preferably https sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml -# SB-31155 - This should be deprecated in future in favour of content_storage +# SB-31155 - This should be deprecated in future in favour of content_storage defined in all.yml sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml -# SB-31155 - Adding a generalized variable which can be used for any CSP -content_storage: "{{ sunbird_content_azure_storage_container }}" - # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # @@ -129,15 +126,12 @@ postgres: db_admin_user: postgres db_admin_password: "{{core_vault_postgres_password}}" -upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -plugin_upstream_url: "{{ upstream_url }}" azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" plugin_container_name: "{{sunbird_content_azure_storage_container}}" -kp_schema_base_path: "{{ upstream_url }}/schemas/local" keycloak_api_management_user_email: "admin@sunbird.org" sunbird_installation_email: "admin@sunbird.org" From 996c091b92d728a2926b9147767ee103d87296b4 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 15 Nov 2022 16:45:27 +0530 Subject: [PATCH 072/203] fix: moved var to all.yml of LP repo Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/KnowledgePlatform/common.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 831eaf7c4a..b905d7b359 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -41,9 +41,6 @@ cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core. # SB-31155 - This should be deprecated in future in favour of plugin_storage plugin_container_name: "{{ azure_public_container }}" -# SB-31155 - Adding a generalized variable which can be used for any CSP -plugin_storage: "{{ plugin_container_name }}" - kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins From f55ab75992bea5ca7ab7b9c15854103f307df1f5 Mon Sep 17 00:00:00 2001 From: Ashwiniev95 Date: Wed, 16 Nov 2022 11:30:18 +0530 Subject: [PATCH 073/203] Add few new variables --- ansible/roles/ml-analytics-service/defaults/main.yml | 5 +++++ ansible/roles/ml-analytics-service/templates/config.j2 | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 5c5d87dace..f95a395b7d 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -99,3 +99,8 @@ ml_analytics_druid_observation_batch_ingestion_spec: '{"type":"index","spec":{"i ml_analytics_observation_batchupdate_azure_blob_path: "observation/batchDeletion" ml_analytics_observation_submission_id_filepath: "{{ WORKDIR }}/ml-analytics-service/observations/submissions.csv" ml_analytics_observation_batchupdate_output_dir: "{{ WORKDIR }}/source/observations/" +ml_analytics_druid_survey_query_spec : '{"queryType":"scan","dataSource":"sl-survey","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","surveyId","surveyName","surveySubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","channel","parent_channel","appName","organisation_name","user_subtype","user_type","board_name","district_code","district_name","district_externalId","block_code","block_name","block_externalId","school_code","school_name","school_externalId","cluster_code","cluster_name","cluster_externalId","state_code","state_name","state_externalId","organisation_id","evidences"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' +survey_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' +ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" +ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" +ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index b4945675d3..cef1739e4e 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -106,6 +106,10 @@ observation_query_spec = {{ ml_analytics_druid_observation_query_spec }} observation_injestion_spec = {{ml_analytics_druid_observation_batch_ingestion_spec}} +survey_query_spec = {{ml_analytics_druid_survey_query_spec}} + +survey_injestion_spec = {{ml_analytics_druid_survey_batch_ingestion_spec}} + [KAFKA] url = {{ ml_analytics_kafka_url }} @@ -188,6 +192,8 @@ projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} +survey_batch_ingestion_data_del = {{ml_analytics_survey_batchupdate_azure_blob_path}} + [REDIS] host = {{ ml_analytics_redis_host }} @@ -224,6 +230,10 @@ observation_sub_ids = {{ ml_analytics_observation_submission_id_filepath }} observation_druid_data = {{ ml_analytics_observation_batchupdate_output_dir }} +survey_sub_ids = {{ml_analytics_survey_submission_id_filepath}} + +survey_druid_data = {{ml_analytics_survey_batchupdate_output_dir}} + [CLOUD_STORAGE] service_name = {{ ml_analytics_AWS_service_name }} From e4628c7947ba160d429e9e3c893fd63c16d7cc92 Mon Sep 17 00:00:00 2001 From: Ashwiniev95 Date: Wed, 16 Nov 2022 11:33:51 +0530 Subject: [PATCH 074/203] Update key --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index f95a395b7d..6bcb9616e9 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -100,7 +100,7 @@ ml_analytics_observation_batchupdate_azure_blob_path: "observation/batchDeletion ml_analytics_observation_submission_id_filepath: "{{ WORKDIR }}/ml-analytics-service/observations/submissions.csv" ml_analytics_observation_batchupdate_output_dir: "{{ WORKDIR }}/source/observations/" ml_analytics_druid_survey_query_spec : '{"queryType":"scan","dataSource":"sl-survey","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","surveyId","surveyName","surveySubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","channel","parent_channel","appName","organisation_name","user_subtype","user_type","board_name","district_code","district_name","district_externalId","block_code","block_name","block_externalId","school_code","school_name","school_externalId","cluster_code","cluster_name","cluster_externalId","state_code","state_name","state_externalId","organisation_id","evidences"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' -survey_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' +ml_analytics_druid_survey_batch_ingestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" From f870b051d010ca60e089f0312ad2c57f36a1fc64 Mon Sep 17 00:00:00 2001 From: Surabhi Date: Wed, 16 Nov 2022 13:09:48 +0530 Subject: [PATCH 075/203] removed unused variables --- ansible/roles/stack-sunbird/templates/inbound.env | 5 ----- ansible/roles/stack-sunbird/templates/transformer.env | 5 ----- 2 files changed, 10 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index c9bc2033a8..331ae1d3fc 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -53,11 +53,6 @@ REDIS_PASS={{sunbird_redis_pass | default('')}} REDIS_PORT={{sunbird_redis_port | default(6379)}} REDIS_DB_INDEX={{redis_db_index_uci | default('7')}} -#Azure Config -AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} - #Netcore NETCORE_WHATSAPP_AUTH_TOKEN={{uci_netcore_whatsapp_token}} NETCORE_WHATSAPP_SOURCE={{uci_netcore_whatsapp_source}} diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index f5c2f7b3a5..b5be5c4451 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -70,11 +70,6 @@ REDIS_DB_INDEX={{redis_db_index_uci | default('7')}} EXHAUST_TELEMETRY_ENABLED=TRUE POSTHOG_EVENT_ENABLED=FALSE -#Azure Config -AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} - #Sunbird CDN Configuration SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} From 3a25d5bde548cb3ad03c9d67c81752a543364b3b Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 16 Nov 2022 14:03:55 +0530 Subject: [PATCH 076/203] feat: SB-30654 generalizing plugins upload Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 23 ++++++++++++++++------- pipelines/deploy/CEPlugins/Jenkinsfile | 3 +-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index bf876b3f66..b7f233af67 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -72,13 +72,22 @@ - preview - block: - - name: run the az_copy.sh script - shell: "bash {{ az_file_path }} {{ plugin_storage }} {{ source_file }}" - async: 3600 - poll: 10 - environment: - AZURE_STORAGE_ACCOUNT: "{{ azure_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ azure_public_storage_account_sas }}" + - name: delete batch of files from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-delete-batch.yml + vars: + blob_delete_pattern: "content-plugins/{{ item }}" + with_lines: "cat {{ plugin_list_to_delete_and_upload }}" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "/content-plugins" + local_file_or_folder_path: "{{ source_file }}/{{ item }}" + with_lines: "cat {{ plugin_list_to_delete_and_upload }}" tags: - plugins when: cloud_service_provider == "azure" diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index fea1e80819..1d026ac576 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -31,11 +31,10 @@ node() { sh """ unzip ${artifact} unzip content-plugins.zip - chmod a+x content-plugins/az_copy.sh mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins az_file_path=${currentWs}/ansible/content-plugins/az_copy.sh\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugin_list_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From be05234a5cb2b7165251751460c6de9c1e84a03e Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 16 Nov 2022 18:55:45 +0530 Subject: [PATCH 077/203] feat: upload plugins using existing roles Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 12 +++++++----- ansible/roles/azure-cloud-storage/defaults/main.yml | 7 ++++++- .../azure-cloud-storage/tasks/blob-delete-batch.yml | 2 +- .../azure-cloud-storage/tasks/blob-upload-batch.yml | 3 ++- .../azure-cloud-storage/tasks/container-create.yml | 4 ++-- pipelines/deploy/CEPlugins/Jenkinsfile | 2 +- 6 files changed, 19 insertions(+), 11 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index b7f233af67..357baef98e 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -30,6 +30,7 @@ storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always + no_log: True - block: - name: delete files and folders from azure storage using azcopy @@ -77,17 +78,18 @@ name: azure-cloud-storage tasks_from: blob-delete-batch.yml vars: - blob_delete_pattern: "content-plugins/{{ item }}" - with_lines: "cat {{ plugin_list_to_delete_and_upload }}" + blob_delete_pattern: "content-plugins/{{ item }}/*" + with_lines: cat {{ plugin_list_to_delete_and_upload }} - name: upload batch of files to azure storage include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_folder_path: "/content-plugins" - local_file_or_folder_path: "{{ source_file }}/{{ item }}" - with_lines: "cat {{ plugin_list_to_delete_and_upload }}" + blob_container_folder_path: "/content-plugins/{{ item }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item }}" + create_container: false + with_lines: cat {{ plugin_list_to_delete_and_upload }} tags: - plugins when: cloud_service_provider == "azure" diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 0e4e45bf95..8f6673d3c9 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -64,4 +64,9 @@ blob_container_folder_path: "" # This variable affects only new containers and has no affect on a container if it already exists # If the container already exists, the access level will not be changed # You will need to change the access level from Azure portal or using az storage container set-permission command -container_public_access: "" \ No newline at end of file +container_public_access: "" + +# Create the container by default before running the specific azure tasks +# If we would like to skip container creation (in case of a looped execution), you can set this value to false +# in order to skip the task for every iteration +create_container: true \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml index 4e8ad68a2d..e642a6f24f 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -1,5 +1,5 @@ --- -- name: delete files and folders from a blob container recursively +- name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 3043da46cc..8f10576cb5 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,8 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + when: create_container is true -- name: upload files and folders from a local directory to azure storage container +- name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/container-create.yml b/ansible/roles/azure-cloud-storage/tasks/container-create.yml index 419510cc19..847c765a33 100644 --- a/ansible/roles/azure-cloud-storage/tasks/container-create.yml +++ b/ansible/roles/azure-cloud-storage/tasks/container-create.yml @@ -1,8 +1,8 @@ --- -- name: create container in azure storage if it doesn't exist +- name: create container if it doesn't exist shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" when: storage_account_key | length > 0 -- name: create container in azure storage if it doesn't exist +- name: create container if it doesn't exist shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --sas-token '{{ storage_account_sas_token }}'" when: storage_account_sas_token | length > 0 \ No newline at end of file diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index 1d026ac576..078069bbe0 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -34,7 +34,7 @@ node() { mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugin_list_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_folder=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugins_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From f436573fcf8d07c1c67c4f099c6a01b446c3ed06 Mon Sep 17 00:00:00 2001 From: G33tha Date: Wed, 16 Nov 2022 20:18:21 +0530 Subject: [PATCH 078/203] grouping aws specific tasks into a single role (#3573) --- ansible/artifacts-download.yml | 13 ++++ ansible/artifacts-upload.yml | 13 ++++ ansible/assets-upload.yml | 24 +++++++ ansible/bootstrap.yml | 10 +++ ansible/deploy-plugins.yml | 64 +++++++++++++++++++ ansible/desktop-faq-upload.yml | 51 +++++++++++++++ ansible/dial_upload-schema.yml | 13 ++++ ansible/kp_upload-schema.yml | 13 ++++ ansible/roles/aws-cli/defaults/main.yml | 1 + ansible/roles/aws-cli/tasks/main.yml | 24 +++++++ .../roles/aws-cloud-storage/defaults/main.yml | 3 + .../aws-cloud-storage/tasks/delete-folder.yml | 9 +++ .../roles/aws-cloud-storage/tasks/delete.yml | 9 +++ .../aws-cloud-storage/tasks/download.yml | 9 +++ .../roles/aws-cloud-storage/tasks/main.yml | 18 ++++++ .../aws-cloud-storage/tasks/upload-folder.yml | 9 +++ .../roles/aws-cloud-storage/tasks/upload.yml | 9 +++ ansible/roles/cassandra-backup/tasks/main.yml | 13 ++++ .../roles/cassandra-restore/tasks/main.yml | 14 ++++ ansible/roles/cert-templates/tasks/main.yml | 13 ++++ ansible/roles/desktop-deploy/tasks/main.yml | 28 ++++++++ ansible/roles/grafana-backup/tasks/main.yml | 13 ++++ .../jenkins-backup-upload/tasks/main.yml | 13 ++++ ansible/roles/mongodb-backup/tasks/main.yml | 13 ++++ .../tasks/main.yml | 13 ++++ .../tasks/main.yml | 13 ++++ .../roles/postgresql-backup/tasks/main.yml | 13 ++++ .../roles/postgresql-restore/tasks/main.yml | 13 ++++ .../roles/prometheus-backup-v2/tasks/main.yml | 13 ++++ .../roles/prometheus-backup/tasks/main.yml | 13 ++++ .../roles/prometheus-restore/tasks/main.yml | 13 ++++ ansible/roles/redis-backup/tasks/main.yml | 13 ++++ ansible/uploadFAQs.yml | 15 +++++ .../dev/jobs/Core/jobs/Bootstrap/config.xml | 1 + pipelines/deploy/CEPlugins/Jenkinsfile | 2 +- .../ansible/inventory/dev/Core/common.yml | 8 ++- .../ansible/inventory/dev/Core/secrets.yml | 11 +++- 37 files changed, 535 insertions(+), 5 deletions(-) create mode 100644 ansible/roles/aws-cli/defaults/main.yml create mode 100644 ansible/roles/aws-cli/tasks/main.yml create mode 100644 ansible/roles/aws-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/delete-folder.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/delete.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/download.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/upload-folder.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/upload.yml diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index cb8230d44b..043446554d 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -26,3 +26,16 @@ dest_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" + + - name: download artifact from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + local_file_or_folder_path: "{{ artifact_path }}" + s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" + s3_path: "{{ artifacts_container }}/{{ artifact }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" + aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + when: cloud_service_provider == "aws" \ No newline at end of file diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 52e67448c7..32e866808c 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -27,3 +27,16 @@ dest_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" + + - name: upload artifact to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + local_file_or_folder_path: "{{ artifact_path }}" + s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" + s3_path: "{{ artifacts_container }}/{{ artifact }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" + aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + when: cloud_service_provider == "aws" \ No newline at end of file diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 3809c63722..12021680fe 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -35,6 +35,30 @@ local_file_or_folder_path: "{{ assets }}" when: cloud_service_provider == "azure" +##### AWS + - name: this block consists of tasks related to aws storage + block: + - name: set common aws variables + set_fact: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + s3_path: "{{ player_cdn_storage }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + + - name: delete files and folders from s3 + include_role: + name: aws-cloud-storage + tasks_from: delete-folder.yml + + - name: upload batch of files to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + local_file_or_folder_path: "{{ assets }}" + when: cloud_service_provider == "aws" + #GCP - name: this block consists of tasks related to azure storage block: diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 36d9d7b0d0..b23479e833 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -40,6 +40,16 @@ tags: - gcloud_cli +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: aws-cli + tags: + - aws_cli + - hosts: "{{ hosts| default('all') }}" become: yes gather_facts: no diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index bf876b3f66..fa4156d3c7 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -133,3 +133,67 @@ - preview when: cloud_service_provider == "gcloud" +################################### AWS tasks ######################### + - name: this block consists of tasks related to aws s3 + block: + - name: set common aws variables + set_fact: + aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + tags: + - always + + - block: + - name: delete files and folders from s3 + include_role: + name: aws-cloud-storage + tasks_from: delete-folder.yml + vars: + s3_path: "{{ plugin_storage }}/{{ folder_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload folder to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_path: "{{ plugin_storage }}/{{ folder_name }}" + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_path: "{{ plugin_storage }}/artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + + - block: + - name: run the s3_copy.sh script + shell: "bash {{ s3_file_path }} {{ plugin_storage }} {{ source_file }} {{ aws_public_s3_bucket_name }}" + async: 3600 + poll: 10 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + tags: + - plugins + when: cloud_service_provider == "aws" \ No newline at end of file diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index f20f0d7eeb..911153576b 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -93,3 +93,54 @@ - upload-chatbot-config - upload-batch when: cloud_service_provider == "gcloud" + +######################## AWS tasks ######################################### + + - name: this block consists of tasks related to aws s3 + block: + - name: set common aws variables + set_fact: + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + s3_path: "{{ upload_storage }}/{{ destination_path }}" + tags: + - always + + - block: + - name: upload file to aws s3 public bucket + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to aws s3 private bucket + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_private_s3_bucket_name }}" + aws_access_key_id: "{{ aws_private_bucket_access_key }}" + aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" + tags: + - upload-label + + - block: + - name: upload folder to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "aws" + \ No newline at end of file diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index a93a900263..f046e63462 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -38,6 +38,19 @@ storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" when: cloud_service_provider == "azure" + + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "dial_schema_template_files" + s3_path: "{{ dial_plugin_storage }}/schemas/local" + when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage include_role: diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index a4f6bda83a..aecdab077a 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -23,6 +23,19 @@ storage_account_key: "{{ azure_public_storage_account_key }}" when: cloud_service_provider == "azure" + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ source_name }}" + s3_path: "{{ plugin_storage }}/schemas/local" + when: cloud_service_provider == "aws" + - name: upload batch of files to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/aws-cli/defaults/main.yml b/ansible/roles/aws-cli/defaults/main.yml new file mode 100644 index 0000000000..53d866eafa --- /dev/null +++ b/ansible/roles/aws-cli/defaults/main.yml @@ -0,0 +1 @@ +aws_cli_url: https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip \ No newline at end of file diff --git a/ansible/roles/aws-cli/tasks/main.yml b/ansible/roles/aws-cli/tasks/main.yml new file mode 100644 index 0000000000..5907fb1aaf --- /dev/null +++ b/ansible/roles/aws-cli/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Download the installation file + get_url: + url: "{{ aws_cli_url }}" + dest: /tmp/awscliv2.zip + +- name: Installing unzip + apt: + name: "{{item}}" + state: latest + with_items: + - zip + - unzip + +- name: Unzip the installer + unarchive: + src: /tmp/awscliv2.zip + dest: /tmp/ + remote_src: yes + +- name: install aws cli + shell: ./aws/install + args: + chdir: /tmp/ diff --git a/ansible/roles/aws-cloud-storage/defaults/main.yml b/ansible/roles/aws-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..6f3f6f86d6 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/defaults/main.yml @@ -0,0 +1,3 @@ +s3_bucket_name: "" +s3_path: "" +local_file_or_folder_path: "" diff --git a/ansible/roles/aws-cloud-storage/tasks/delete-folder.yml b/ansible/roles/aws-cloud-storage/tasks/delete-folder.yml new file mode 100644 index 0000000000..c912b14edb --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/delete-folder.yml @@ -0,0 +1,9 @@ +--- +- name: delete files and folders recursively + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 rm s3://{{ s3_bucket_name }}/{{ s3_path }} --recursive" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/delete.yml b/ansible/roles/aws-cloud-storage/tasks/delete.yml new file mode 100644 index 0000000000..414ea52e6b --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/delete.yml @@ -0,0 +1,9 @@ +--- +- name: delete files from s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 rm s3://{{ s3_bucket_name }}/{{ s3_path }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/download.yml b/ansible/roles/aws-cloud-storage/tasks/download.yml new file mode 100644 index 0000000000..138024af78 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/download.yml @@ -0,0 +1,9 @@ +--- +- name: download files to s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 cp s3://{{ s3_bucket_name }}/{{ s3_path }} {{ local_file_or_folder_path }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/main.yml b/ansible/roles/aws-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..62f204a9d2 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: delete files from aws S3 bucket + include: delete.yml + +- name: delete folders from aws S3 bucket recursively + include: delete-folder.yml + + +- name: download file from S3 + include: download.yml + +- name: upload files from a local to aws S3 + include: upload.yml + +- name: upload files and folder from local directory to aws S3 + include: upload-folder.yml + + diff --git a/ansible/roles/aws-cloud-storage/tasks/upload-folder.yml b/ansible/roles/aws-cloud-storage/tasks/upload-folder.yml new file mode 100644 index 0000000000..3e03b068b7 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/upload-folder.yml @@ -0,0 +1,9 @@ +--- +- name: upload folder to s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 cp {{ local_file_or_folder_path }} s3://{{ s3_bucket_name }}/{{ s3_path }} --recursive" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/upload.yml b/ansible/roles/aws-cloud-storage/tasks/upload.yml new file mode 100644 index 0000000000..af8de990e2 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/upload.yml @@ -0,0 +1,9 @@ +--- +- name: upload files to s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 cp {{ local_file_or_folder_path }} s3://{{ s3_bucket_name }}/{{ s3_path }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index fc662bcea5..507aeb190b 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -45,6 +45,19 @@ storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" +- name: upload backup to S3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + s3_path: "{{ cassandra_backup_storage }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 3b2fc3ae9b..8a47ab7089 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -18,6 +18,20 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + become: true + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" + s3_path: "{{ cassandra_backup_storage }}/{{ cassandra_restore_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index acecc4d6f4..78f1f769b3 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -44,6 +44,19 @@ storage_account_key: "{{ azure_private_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_private_s3_bucket_name }}" + aws_access_key_id: "{{ aws_private_bucket_access_key }}" + aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + s3_path: "{{ cert_service_storage }}" + when: cloud_service_provider == "aws" + - name: upload batch of files to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 4ce4da3fb6..09c41300ef 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -75,6 +75,34 @@ local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "azure" +######################## AWS tasks ################################## + +- name: this block consists of tasks related to aws s3 + block: + - name: set common aws variables + set_fact: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_path: "{{ offline_installer_storage }}" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_path: "{{ offline_installer_storage }}/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "aws" + - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 0f0a44a2b2..2c8520030c 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -32,6 +32,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" + s3_path: "{{ grafana_backup_storage }}/{{ grafana_backup_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index 32be77b7a7..a94e57fe4a 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -25,6 +25,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" + s3_path: "{{ jenkins_backup_storage }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 4ae40ecd2b..0762f2754f 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -27,6 +27,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" + s3_path: "{{ mongo_backup_storage }}/{{ mongo_backup_file_name }}.tar.gz" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index 686f4c42f6..ea206146b3 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -54,6 +54,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}.zip" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 7df51e26b4..0299ff3f73 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -21,6 +21,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" + s3_path: "{{ postgres_backup_storage }}/{{ postgres_backup_filename }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 0704d4847f..65116bede0 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -26,6 +26,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index ec6a40494d..877e178987 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -16,6 +16,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" + s3_path: "{{ postgres_backup_storage }}/{{ postgresql_restore_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 0cafacb627..3831080dbc 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -29,6 +29,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 32cffa6e5c..55a51287ae 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -38,6 +38,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" + s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 843ebe4598..2232770fdd 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -14,6 +14,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" + s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_filename }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 51f7ab63ff..5359a362c8 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -26,6 +26,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ redis_backup_file_path }}" + s3_path: "{{ nodebb_redis_backup_storage }}/{{ redis_backup_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 52923e1bf4..b37398b874 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -29,6 +29,21 @@ with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "azure" + + - name: upload batch of files to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + s3_path: "{{ upload_storage }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage include_role: diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index 20d7006b52..b95bca2645 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index fea1e80819..e1baf9ca14 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -35,7 +35,7 @@ node() { mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins az_file_path=${currentWs}/ansible/content-plugins/az_copy.sh\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins az_file_path=${currentWs}/ansible/content-plugins/az_copy.sh\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 286c957102..1984bcd2b3 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -34,7 +34,11 @@ azure_management_storage_account_name: "{{ sunbird_management_storage_account_na azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" # Define the below if you are using AWS Cloud -aws_management_bucket_name: "" +aws_region: "" +aws_management_s3_bucket_name: "" +aws_artifact_s3_bucket_name: "" +aws_public_s3_bucket_name: "" +aws_private_s3_bucket_name: "" # Define the below if you are using Google Cloud gcloud_private_bucket_name: "" @@ -48,7 +52,7 @@ gcloud_private_bucket_projectId: "" # GCP # cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} # AWS -# cloud_storage_url: # Geetha to fill this url based on AWS role vars +# cloud_storage_url: "https://{{aws_public_s3_bucket_name}}.s3.{{aws_region}}.amazonaws.com" # Azure cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index bbb1a526b1..e8e48bf801 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -30,8 +30,15 @@ azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" # Define the below if you are using AWS Cloud -aws_management_bucket_user_access_key: "" -aws_management_bucket_user_secret_key: "" +aws_management_bucket_access_key: "" +aws_artifact_bucket_access_key: "" +aws_public_bucket_access_key: "" +aws_private_bucket_access_key: "" + +aws_management_bucket_secret_access_key: "" +aws_artifact_bucket_secret_access_key: "" +aws_public_bucket_secret_access_key: "" +aws_private_bucket_secret_access_key: "" # Define the below if you are using Google Cloud gcp_storage_service_account_name: "" From ab8a0b1f1e2e0e7d5f27c8eff9b038f73213f230 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 01:06:12 +0530 Subject: [PATCH 079/203] fix: remove polling to save time --- ansible/deploy-plugins.yml | 19 +++++++------------ .../azure-cloud-storage/defaults/main.yml | 2 +- .../tasks/blob-delete-batch-no-poll.yml | 5 +++++ .../tasks/blob-delete-batch.yml | 4 ++-- .../tasks/blob-upload-batch-no-poll.yml | 5 +++++ .../tasks/blob-upload-batch.yml | 6 +++--- pipelines/deploy/CEPlugins/Jenkinsfile | 2 +- 7 files changed, 24 insertions(+), 19 deletions(-) create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 357baef98e..508e32e1de 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -76,20 +76,15 @@ - name: delete batch of files from azure storage include_role: name: azure-cloud-storage - tasks_from: blob-delete-batch.yml + tasks_from: "{{ item[0] }}" vars: - blob_delete_pattern: "content-plugins/{{ item }}/*" - with_lines: cat {{ plugin_list_to_delete_and_upload }} - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_folder_path: "/content-plugins/{{ item }}" - local_file_or_folder_path: "{{ source_folder }}/{{ item }}" + blob_delete_pattern: "content-plugins/{{ item[1] }}/*" + blob_container_folder_path: "/content-plugins/{{ item[1] }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" create_container: false - with_lines: cat {{ plugin_list_to_delete_and_upload }} + with_nested: + - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] + - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" tags: - plugins when: cloud_service_provider == "azure" diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 8f6673d3c9..0f4b72d96d 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -69,4 +69,4 @@ container_public_access: "" # Create the container by default before running the specific azure tasks # If we would like to skip container creation (in case of a looped execution), you can set this value to false # in order to skip the task for every iteration -create_container: true \ No newline at end of file +create_container: True diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml new file mode 100644 index 0000000000..152e3a49ad --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} + shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml index e642a6f24f..152e3a49ad 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -1,5 +1,5 @@ --- - name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 3600 - poll: 10 \ No newline at end of file + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml new file mode 100644 index 0000000000..ff00854851 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} + shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 8f10576cb5..59d1098fc6 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,9 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is true + when: create_container is True - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 3600 - poll: 10 \ No newline at end of file + async: 1800 + poll: 0 \ No newline at end of file diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index 078069bbe0..865d71a34c 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -34,7 +34,7 @@ node() { mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_folder=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugins_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_folder=${currentWs}/ansible/content-plugins plugins_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugins_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From 949f1da2fa3bcc996c80d5b314f1ae6891c03df6 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 01:18:41 +0530 Subject: [PATCH 080/203] fix: updated comments Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 1 - ansible/roles/azure-cloud-storage/defaults/main.yml | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 508e32e1de..2fe881dc93 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -81,7 +81,6 @@ blob_delete_pattern: "content-plugins/{{ item[1] }}/*" blob_container_folder_path: "/content-plugins/{{ item[1] }}" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" - create_container: false with_nested: - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 0f4b72d96d..824d5af18a 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -66,7 +66,7 @@ blob_container_folder_path: "" # You will need to change the access level from Azure portal or using az storage container set-permission command container_public_access: "" -# Create the container by default before running the specific azure tasks -# If we would like to skip container creation (in case of a looped execution), you can set this value to false -# in order to skip the task for every iteration +# Creates the container by default before running the specific azure blob tasks +# If we would like to skip container creation (in case of a looped execution), +# you can set this value to False in order to skip the contatiner creation task for every iteration create_container: True From 931cd0509ba6c68f303f2c0c3eb9b622fdb78206 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 01:20:09 +0530 Subject: [PATCH 081/203] fix: added a var to control container creation Signed-off-by: Keshav Prasad --- ansible/roles/azure-cloud-storage/defaults/main.yml | 2 +- ansible/roles/azure-cloud-storage/tasks/blob-upload.yml | 1 + ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 824d5af18a..b5266cb76a 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -67,6 +67,6 @@ blob_container_folder_path: "" container_public_access: "" # Creates the container by default before running the specific azure blob tasks -# If we would like to skip container creation (in case of a looped execution), +# If you would like to skip container creation (in case of a looped execution), # you can set this value to False in order to skip the contatiner creation task for every iteration create_container: True diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml index 4b493ffb73..36423dcfc6 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -3,6 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + when: create_container is True - name: upload file to azure storage container shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index 99ab3c2bf8..0726b48f00 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -3,6 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + when: create_container is True - name: upload files and folders to azure storage using azcopy shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" From 2d9eb95c6af1ec44b7d9869f7196c91d3375a2c6 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Thu, 17 Nov 2022 10:18:16 +0530 Subject: [PATCH 082/203] env changes for project certificate story (#3599) --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 8 +------- .../roles/stack-sunbird/templates/ml-projects-service.env | 3 +++ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index 7b1da9c931..e3db337464 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -81,10 +81,4 @@ ELASTICSEARCH_ENTITIES_INDEX={{ml_core_elasticsearch_user_extension_index_type | USER_SERVICE_URL={{ml_core_user_service_URL | default("http://learner-service:9000")}} ## portal url of env -APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} - -# Project certificate enable or disable flag E.g. ON/OFF -PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} - -# certificate issuer KID value -CERTIFICATE_ISSUER_KID={{certificate_issuer_kid | default("")}} \ No newline at end of file +APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/ml-projects-service.env b/ansible/roles/stack-sunbird/templates/ml-projects-service.env index eeca6c4d70..16e8340a1c 100644 --- a/ansible/roles/stack-sunbird/templates/ml-projects-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-projects-service.env @@ -39,3 +39,6 @@ PROJECT_SUBMISSION_TOPIC={{ml_project_submission_topic | default (env_name+".ml. # Base url of the sunbird enviornment USER_SERVICE_URL={{ml_project_user_service_URL | default("http://learner-service:9000")}} + +# certificate issuer KID value +CERTIFICATE_ISSUER_KID={{certificate_issuer_kid | default("")}} From 4b41e72e1da9645cb70d7893355141f7210bb3ad Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 15:21:23 +0530 Subject: [PATCH 083/203] fix: remove unnecessary become Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 0d05b069eb..63dba9cd96 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -1,5 +1,4 @@ - hosts: local - become: yes gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" @@ -201,4 +200,4 @@ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" tags: - plugins - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" From 25c6f71fad5ba01e1c2374cc9a23b7f53fdf77ac Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 17:14:10 +0530 Subject: [PATCH 084/203] fix: poll incorrectly set to 0 Signed-off-by: Keshav Prasad --- ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml | 4 ++-- ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml index 152e3a49ad..e642a6f24f 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -1,5 +1,5 @@ --- - name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 1800 - poll: 0 \ No newline at end of file + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 59d1098fc6..82f806a803 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -7,5 +7,5 @@ - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 1800 - poll: 0 \ No newline at end of file + async: 3600 + poll: 10 \ No newline at end of file From 3dc90f9eb9ff25c8b919b81d806c55ec328307a5 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 19:24:20 +0530 Subject: [PATCH 085/203] fix: renamed task (#3601) --- ansible/deploy-plugins.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 63dba9cd96..8da2bd445e 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -72,7 +72,7 @@ - preview - block: - - name: delete batch of files from azure storage + - name: delete and re-upload plugins include_role: name: azure-cloud-storage tasks_from: "{{ item[0] }}" @@ -200,4 +200,4 @@ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" tags: - plugins - when: cloud_service_provider == "aws" + when: cloud_service_provider == "aws" \ No newline at end of file From bdbea1be4ef6c30d61ab1406669a5a3cd0573aef Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Mon, 21 Nov 2022 15:47:09 +0530 Subject: [PATCH 086/203] Certificate env changes- RC- internal call used instead env variable (#3603) From daf4b7c37236bae6f813ccf26acaa58a80662bc4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:27:44 +0530 Subject: [PATCH 087/203] Update config.j2 --- .../ml-analytics-service/templates/config.j2 | 144 ++++++++++-------- 1 file changed, 83 insertions(+), 61 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index cef1739e4e..f43f08191a 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -4,7 +4,8 @@ survey_app = {{ ml_analytics_survey_app_name }} integrated_app = {{ ml_analytics_integrated_app_name }} -integrated_portal = {{ ml_analytics_integrated_portal }} +integrated_portal = {{ ml_analytics_integrated_portal_name }} + [API_HEADERS] @@ -14,6 +15,7 @@ authorization = {{ ml_analytics_api_authorization_key }} internal_access_token = {{ ml_analytics_api_access_token }} + [ML_SURVEY_SERVICE_URL] url = {{ ml_analytics_survey_service }} @@ -24,24 +26,15 @@ user_profile_end_point = assessment/api/v1/userExtension/getProfile/ evidence_base_url = {{ ml_analytics_evidence_base_url }} -[MONGO] - -# -------------- -# Mongo url -#--------------- -mongo_url = mongodb://{{ ml_analytics_mongodb_url }} +[MONGO] -# ----------------------- -# Mongo database name -# ----------------------- +url = mongodb://{{ ml_analytics_mongodb_url }} database_name = {{ ml_analytics_mongo_db_name }} -# ------------------- -# Mongo Collections -# ------------------- +# ------ Mongo Collections ------- # observation_sub_collection = {{ ml_analytics_mongo_observation_submission_collection }} solutions_collection = {{ ml_analytics_mongo_solution_collection }} @@ -68,6 +61,7 @@ survey_submissions_collection = {{ ml_analytics_mongo_survey_submissions_collect survey_collection = {{ ml_analytics_mongo_survey_collection }} + [DRUID] metadata_url = http://{{ ml_analytics_druid_url }}/druid/coordinator/v1/datasources/ @@ -84,13 +78,15 @@ observation_status_injestion_spec = {{ ml_analytics_druid_observation_status_inj project_injestion_spec = {{ ml_analytics_druid_project_injestion_spec }} -ml_distinctCnt_obs_status_spec = {{ ml_analytics_druid_distinctCnt_obs_injestion_spec }} +ml_distinctCnt_obs_status_spec = {{ ml_analytics_druid_distinctCnt_obs_injestion_spec }} -ml_distinctCnt_obs_domain_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_injestion_spec }} +ml_distinctCnt_obs_domain_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_injestion_spec }} -ml_distinctCnt_obs_domain_criteria_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec }} +ml_distinctCnt_obs_domain_criteria_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec }} -ml_distinctCnt_projects_status_spec = {{ ml_analytics_druid_distinctCnt_projects_status_injestion_spec }} +ml_distinctCnt_projects_status_spec = {{ ml_analytics_druid_distinctCnt_projects_status_injestion_spec }} + +ml_distinctCnt_prglevel_projects_status_spec = {{ ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec }} observation_status_rollup_injestion_spec = {{ ml_analytics_druid_observation_status_rollup_injestion_spec }} @@ -98,8 +94,6 @@ project_rollup_injestion_spec = {{ ml_analytics_druid_project_rollup_injestion_s ml_survey_rollup_spec = {{ml_analytics_druid_survey_rollup_injestion_spec}} -ml_distinctCnt_prglevel_projects_status_spec = {{ ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec}} - survey_status_injestion_spec = {{ ml_analytics_druid_survey_status_injestion_spec }} observation_query_spec = {{ ml_analytics_druid_observation_query_spec }} @@ -110,6 +104,9 @@ survey_query_spec = {{ml_analytics_druid_survey_query_spec}} survey_injestion_spec = {{ml_analytics_druid_survey_batch_ingestion_spec}} +intervals = {{ml_analytics_druid_interval_list}} + + [KAFKA] url = {{ ml_analytics_kafka_url }} @@ -126,6 +123,7 @@ survey_raw_topic = {{ ml_analytics_kafka_survey_topic_name }} survey_druid_topic = {{ ml_analytics_kafka_survey_druid_topic_name }} + [LOGS] observation_streaming_success = {{ ml_analytics_observation_log_folder_path }}/success.log @@ -152,55 +150,54 @@ survey_streaming_success = {{ ml_analytics_survey_log_folder_path }}/success.log survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log -[AZURE] +{% if ML_Cloud_Service_Provider is eq 'ORACLE' %} -account_name = {{ ml_analytics_azure_account_name }} +[ORACLE] -sas_token = {{ ml_analytics_azure_sas_token }} +endpoint_url = {{ ml_ORACLE_endpoint_url }} -container_name = {{ ml_analytics_azure_container_name }} +access_key = {{ ml_ORACLE_access_key }} -observation_blob_path = {{ ml_analytics_observation_azure_blob_path }} +secret_access_key = {{ ml_ORACLE_secret_access_key }} -projects_blob_path = {{ ml_analytics_project_azure_blob_path }} +region_name = {{ ml_ORACLE_region_name }} -observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_azure_blob_path }} +bucket_name = {{ ml_ORACLE_bucket_name }} -observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_azure_blob_path }} +{% elif ML_Cloud_Service_Provider is eq 'GCP' %} -observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path }} +[GCP] -projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_azure_blob_path }} +secret_data = {{ ml_GCP_secret_json_file }} -projects_rollup_blob_path = {{ ml_analytics_project_rollup_azure_blob_path }} +bucket_name = {{ ml_GCP_bucket_name }} -observation_rollup_blob_path = {{ ml_analytics_observation_rollup_azure_blob_path }} +{% elif ML_Cloud_Service_Provider is eq 'AWS' %} -survey_rollup_blob_path = {{ ml_analytics_survey_rollup_azure_blob_path }} +[AWS] -projects_distinctCnt_prgmlevel_blob_path = {{ml_analytics_projects_distinctCnt_prglevel_azure_blob_path}} +service_name = {{ ml_AWS_service_name }} -survey_blob_path = {{ ml_analytics_survey_azure_blob_path }} +access_key = {{ ml_AWS_access_key }} -public_account_name = {{ ml_analytics_public_azure_account_name }} +secret_access_key = {{ ml_AWS_secret_access_key }} -public_access_key = {{ ml_analytics_public_azure_access_key }} +region_name = {{ ml_AWS_region_name }} -public_container_name = {{ ml_analytics_public_azure_container_name }} +bucket_name = {{ ml_AWS_bucket_name }} -projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} +{% else %} -observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} +[AZURE] -survey_batch_ingestion_data_del = {{ml_analytics_survey_batchupdate_azure_blob_path}} +account_name = {{ ml_analytics_azure_account_name }} -[REDIS] +sas_token = {{ ml_analytics_azure_sas_token }} -host = {{ ml_analytics_redis_host }} +container_name = {{ ml_analytics_azure_container_name }} -port = {{ ml_analytics_redis_port }} +account_key = {{ ml_analytics_azure_account_key }} -db_name = {{ ml_analytics_redis_db_name }} [OUTPUT_DIR] @@ -208,22 +205,22 @@ project = {{ ml_analytics_project_output_dir }} observation_status = {{ ml_analytics_observation_status_output_dir }} -observation_distinctCount_status = {{ ml_analytics_obs_distinctCnt_output_dir }} +observation_distinctCount_status = {{ ml_analytics_obs_distinctCnt_output_dir }} -observation_distinctCount_domain = {{ ml_analytics_obs_distinctCnt_domain_output_dir }} +observation_distinctCount_domain = {{ ml_analytics_obs_distinctCnt_domain_output_dir }} -observation_distinctCount_domain_criteria = {{ ml_analytics_obs_distinctCnt_domain_criteria_output_dir }} +observation_distinctCount_domain_criteria = {{ ml_analytics_obs_distinctCnt_domain_criteria_output_dir }} projects_distinctCount = {{ ml_analytics_projects_distinctCnt_output_dir }} +projects_distinctCount_prgmlevel = {{ ml_analytics_projects_distinctCnt_prglevel_output_dir }} + project_rollup = {{ ml_analytics_project_rollup_output_dir }} observation_status_rollup = {{ ml_analytics_observation_status_rollup_output_dir }} survey_rollup = {{ ml_analytics_survey_rollup_output_dir }} -projects_distinctCount_prgmlevel = {{ml_analytics_projects_distinctCnt_prglevel_output_dir}} - survey_status = {{ ml_analytics_survey_status_output_dir }} observation_sub_ids = {{ ml_analytics_observation_submission_id_filepath }} @@ -234,26 +231,51 @@ survey_sub_ids = {{ml_analytics_survey_submission_id_filepath}} survey_druid_data = {{ml_analytics_survey_batchupdate_output_dir}} -[CLOUD_STORAGE] -service_name = {{ ml_analytics_AWS_service_name }} +[SLACK] -access_key = {{ ml_analytics_AWS_access_key }} +token = {{ml_slack_token}} -secret_access_key = {{ ml_analytics_AWS_secret_access_key }} +channel = {{ml_slack_channel}} -region_name = {{ ml_analytics_AWS_region_name }} -bucket_name = {{ ml_analytics_AWS_bucket_name }} +[VAM] -[SLACK] +druid_query_url = {{ ml_druid_query_data }} -token = {{ ml_analytics_slack_token }} +program_dashboard_data = {{ ml_program_dashboard_data }} -channel = {{ ml_analytics_channel_name }} -[VAM] +[COMMON] -druid_query_url = {{ ml_druid_query_data }} +cloud_module_path = {{ ml_analytics_cloud_package_path }} -program_dashboard_data = {{ ml_program_dashboard_data }} +observation_blob_path = {{ ml_analytics_observation_azure_blob_path }} + +projects_blob_path = {{ ml_analytics_project_azure_blob_path }} + +observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_azure_blob_path }} + +observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_azure_blob_path }} + +observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path }} + +projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_azure_blob_path }} + +projects_distinctCnt_prgmlevel_blob_path = {{ ml_analytics_projects_distinctCnt_prglevel_azure_blob_path }} + +projects_rollup_blob_path = {{ ml_analytics_project_rollup_azure_blob_path }} + +observation_rollup_blob_path = {{ ml_analytics_observation_rollup_azure_blob_path }} + +survey_rollup_blob_path = {{ ml_analytics_survey_rollup_azure_blob_path }} + +survey_blob_path = {{ ml_analytics_survey_azure_blob_path }} + +projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} + +observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} + +survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_azure_blob_path}} + +cname_url = {{ ml_analytics_cname_url }} From 6bc03f69e454aea55aeeb8b74e5003b403d4376c Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:35:57 +0530 Subject: [PATCH 088/203] Update main.yml --- ansible/roles/ml-analytics-service/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index 0998cf8188..6bf640005e 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -105,6 +105,10 @@ dest: "{{ config_path }}/config.ini" backup: yes +- name: Copy GCP Secrets to JSON file + copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0644 owner="{{ USER }}" + when: ML_Cloud_Service_Provider == 'GCP' + - name: Templating the shell_script_config.j2 to shell_script_config template: src: "shell_script_config.j2" From 38e6fd566b7f2c968c7341088e453d1bd23b7fb2 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:37:28 +0530 Subject: [PATCH 089/203] Update config.j2 --- ansible/roles/ml-analytics-service/templates/config.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index f43f08191a..ce5ff99bff 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -4,7 +4,7 @@ survey_app = {{ ml_analytics_survey_app_name }} integrated_app = {{ ml_analytics_integrated_app_name }} -integrated_portal = {{ ml_analytics_integrated_portal_name }} +integrated_portal = {{ ml_analytics_integrated_portal }} [API_HEADERS] From eb8eb6a37af4aa6935c82488e4b678c930742467 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:56:34 +0530 Subject: [PATCH 090/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 1e6f116cc3..6733061380 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -110,3 +110,8 @@ ml_analytics_druid_survey_batch_ingestion_spec : '{"type":"index","spec":{"ioCon ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" +ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' +ml_analytics_azure_account_key: "{{ sunbird_private_storage_account_sas }}" +ML_Cloud_Service_Provider: "{{ ml_csp | default('AZURE') }}" ## Valid options - ORACLE, GCP, AWS & AZURE +ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" +ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From 7c02eb4653bd23890c3b0fedcd5b77342d29bde7 Mon Sep 17 00:00:00 2001 From: Jayaprakash8887 Date: Wed, 23 Nov 2022 18:39:44 +0530 Subject: [PATCH 091/203] Issue #KN-427 feat: Knowledge service cloud-agnostic --- .../templates/assessment-service_application.conf | 9 +++++---- .../templates/content-service_application.conf | 9 +++++---- .../templates/taxonomy-service_application.conf | 9 +++++---- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf index 851433f160..60d129907a 100644 --- a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf @@ -383,10 +383,11 @@ languageCode { telugu : "te" } -cloud_storage_type: "azure" -azure_storage_key: "{{ sunbird_public_storage_account_name }}" -azure_storage_secret: "{{ sunbird_public_storage_account_key }}" -azure_storage_container: "{{ sunbird_content_azure_storage_container }}" +cloud_storage_type: "{{ cloud_service_provider }}" +cloud_storage_key: "{{ cloud_public_storage_accountname }}" +cloud_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" kafka { urls : "{{ kafka_urls }}" diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index ee0a80a645..24c1007695 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -481,10 +481,11 @@ composite { url : "{{ sunbird_search_service_api_base_url }}/v3/search" } } -cloud_storage_type: "azure" -azure_storage_key: "{{ sunbird_public_storage_account_name }}" -azure_storage_secret: "{{ sunbird_public_storage_account_key }}" -azure_storage_container: "{{ sunbird_content_azure_storage_container }}" +cloud_storage_type: "{{ cloud_service_provider }}" +cloud_storage_key: "{{ cloud_public_storage_accountname }}" +cloud_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" # Google Drive APIKEY learning_content_drive_apiKey = "{{ learning_content_drive_apiKey }}" diff --git a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf index 1c3714fbe8..e1298a1b92 100644 --- a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf @@ -366,10 +366,11 @@ platform { } # Cloud Storage Config -cloud_storage_type: "azure" -azure_storage_key: "{{ sunbird_public_storage_account_name }}" -azure_storage_secret: "{{ sunbird_public_storage_account_key }}" -azure_storage_container: "{{ sunbird_content_azure_storage_container }}" +cloud_storage_type: "{{ cloud_service_provider }}" +cloud_storage_key: "{{ cloud_public_storage_accountname }}" +cloud_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" installation.id: ekstep From f804103408530aa2d9c2de3b6ad43c08aecd97bc Mon Sep 17 00:00:00 2001 From: Jayaprakash8887 Date: Wed, 23 Nov 2022 18:43:07 +0530 Subject: [PATCH 092/203] Revert "Issue #KN-9 feat: Content Publish API refactor." This reverts commit e37ca7291abf51ec385d9c464a3852f32b5724f1. --- .../stack-sunbird/templates/content-service_application.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index 24c1007695..fb5a2e7667 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -494,7 +494,6 @@ kafka { urls : "{{ kafka_urls }}" topic.send.enable : true topics.instruction : "{{ env_name }}.learning.job.request" - publish.request.topic : "{{ env_name }}.publish.job.request" } # DIAL Link Config From 267f338854cd4f8aacb4f957dbeb98537fea55a0 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 11:35:50 +0530 Subject: [PATCH 093/203] Update config.j2 --- ansible/roles/ml-analytics-service/templates/config.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index ce5ff99bff..770de394cb 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -164,7 +164,7 @@ region_name = {{ ml_ORACLE_region_name }} bucket_name = {{ ml_ORACLE_bucket_name }} -{% elif ML_Cloud_Service_Provider is eq 'GCP' %} +{% elif ML_Cloud_Service_Provider is eq 'gcloud' %} [GCP] @@ -172,7 +172,7 @@ secret_data = {{ ml_GCP_secret_json_file }} bucket_name = {{ ml_GCP_bucket_name }} -{% elif ML_Cloud_Service_Provider is eq 'AWS' %} +{% elif ML_Cloud_Service_Provider is eq 'aws' %} [AWS] From d22223a0a4cfc6103d333ffc8069a162abdd6e4c Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 11:53:35 +0530 Subject: [PATCH 094/203] Update main.yml --- .../ml-analytics-service/defaults/main.yml | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6733061380..da0df0ada6 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -44,8 +44,8 @@ ml_analytics_kafka_survey_druid_topic_name: "{{ env_name }}.ml.survey.druid" ml_analytics_observation_log_folder_path: "{{ WORKDIR }}/logs/observation" ml_analytics_project_log_folder_path: "{{ WORKDIR }}/logs/project" ml_analytics_survey_log_folder_path: "{{ WORKDIR }}/logs/survey" -ml_analytics_azure_account_name: "{{ sunbird_private_storage_account_name }}" -ml_analytics_azure_container_name: "telemetry-data-store" +ml_analytics_azure_account_name: "{{ cloud_private_storage_accountname }}" +ml_analytics_azure_container_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_observation_azure_blob_path: "observation/status/" ml_analytics_project_azure_blob_path: "projects/" ml_analytics_redis_host: "{{ml_redis_host | default(groups['dp-redis'][0])}}" @@ -57,7 +57,7 @@ ml_analytics_api_authorization_key: "{{ml_api_auth_token | default('sunbird_api_ ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_internal_access_token')}}" ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' -ml_analytics_azure_sas_token: "{{ sunbird_private_storage_account_key }}" +ml_analytics_azure_sas_token: "{{ cloud_private_storage_secret }}" ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' @@ -80,11 +80,10 @@ ml_analytics_observation_status_rollup_output_dir: "/opt/sparkjobs/source/observ ml_analytics_druid_project_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/rollup/projects_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"iso"},"dimensionsSpec":{"dimensions":["project_title","project_goal","area_of_improvement","status_of_project","tasks_name","tasks_status","designation","task_evidence_status","project_id","task_id","project_created_type","parent_channel","program_id","program_name","project_updated_date","createdBy","program_externalId","private_program","task_deleted_flag","project_terms_and_condition","state_externalId","block_externalId","district_externalId","cluster_externalId","school_externalId","state_name","block_name","district_name","cluster_name","school_name","board_name","organisation_name","solution_id","organisation_id",{"name":"status_code","type":"long"}]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_createBy","fieldName":"createdBy"},{"type":"HLLSketchBuild","name":"count_of_project_id","fieldName":"project_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_druid_observation_status_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/rollup/observation_status_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-observation-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["status","user_id","solution_id","submission_id","entity_name","completedDate","program_id","private_program","solution_type","updatedAt","role_title","solution_name","program_name","channel","parent_channel","block_name","district_name","school_name","cluster_name","state_name","organisation_name","board_name","district_externalId","state_externalId","block_externalId","cluster_externalId","school_externalId","organisation_id",{"type":"long","name":"status_code"}]},"metricsSpec":[{"type":"count","name":"count"},{"type":"longSum","name":"sum___v","fieldName":"__v","expression":null},{"type":"HLLSketchBuild","name":"count_distinct_solution","fieldName":"solution_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_submission_id","fieldName":"submission_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_user_id","fieldName":"user_id","lgK":12,"tgtHllType":"HLL_4","round":false}]}}}' ml_analytics_druid_rollup_url: "{{groups['druid'][0]}}:8081" -ml_analytics_AWS_service_name: "{{ ml_AWS_service_name | default('') }}" -ml_analytics_AWS_access_key: "{{ ml_AWS_access_key | default('') }}" -ml_analytics_AWS_secret_access_key: "{{ ml_AWS_secret_access_key | default('') }}" -ml_analytics_AWS_region_name: "{{ ml_AWS_region_name | default('') }}" -ml_analytics_AWS_bucket_name: "{{ ml_AWS_bucket_name | default('') }}" +ml_analytics_AWS_access_key: "{{ cloud_private_storage_accountname }}" +ml_analytics_AWS_secret_access_key: "{{ cloud_private_storage_secret }}" +ml_analytics_AWS_region_name: "{{ cloud_private_storage_region }}" +ml_analytics_AWS_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/projects/distinctCountPrglevel/output" ml_analytics_projects_distinctCnt_prglevel_azure_blob_path: "projects/distinctCountPrglevel/" @@ -93,9 +92,6 @@ ml_analytics_survey_azure_blob_path : "survey/status/" ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program"]},"metricsSpec":[]}}}' ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" -ml_analytics_public_azure_account_name: "{{ ml_public_azure_account | default('') }}" -ml_analytics_public_azure_access_key: "{{ ml_public_azure_key | default('') }}" -ml_analytics_public_azure_container_name: "{{ ml_public_azure_container | default('') }}" ml_analytics_program_dashboard_azure_blob_path: "{{ ml_program_blob_path | default('') }}" ml_druid_query_data: "{{ ml_druid_query | default('') }}" ml_program_dashboard_data: "{{ ml_program_data | default('') }}" @@ -111,7 +107,7 @@ ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' -ml_analytics_azure_account_key: "{{ sunbird_private_storage_account_sas }}" -ML_Cloud_Service_Provider: "{{ ml_csp | default('AZURE') }}" ## Valid options - ORACLE, GCP, AWS & AZURE +ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" +ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, GCP, AWS & AZURE ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From 69249369d7031e55ccb3737c4352f95e16d652df Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 11:55:16 +0530 Subject: [PATCH 095/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index da0df0ada6..6b12734b74 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -108,6 +108,6 @@ ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" -ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, GCP, AWS & AZURE +ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From bcdb073b4716ccfbcd1bf89d0acec8ecbe02f6ea Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 12:14:58 +0530 Subject: [PATCH 096/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6b12734b74..b9e9557807 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -109,5 +109,5 @@ ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure -ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" +ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From 1932c838f41f48de229dbf2a1e2d8af45ffb37b4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 12:24:35 +0530 Subject: [PATCH 097/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index b9e9557807..bdc0129745 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -110,4 +110,4 @@ ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00: ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" -ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" +ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" From d13d58a0797b9377086e379b2d907386d7a917de Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 13:06:20 +0530 Subject: [PATCH 098/203] Update main.yml --- ansible/roles/ml-analytics-service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index 6bf640005e..a3813c5d05 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -106,7 +106,7 @@ backup: yes - name: Copy GCP Secrets to JSON file - copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0644 owner="{{ USER }}" + copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" when: ML_Cloud_Service_Provider == 'GCP' - name: Templating the shell_script_config.j2 to shell_script_config From dfd09a62adca833c9f37ef56fb45a8aad4667f68 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 14:34:33 +0530 Subject: [PATCH 099/203] Update main.yml --- ansible/roles/ml-analytics-service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index a3813c5d05..dfa015c99c 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -107,7 +107,7 @@ - name: Copy GCP Secrets to JSON file copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" - when: ML_Cloud_Service_Provider == 'GCP' + when: ML_Cloud_Service_Provider == 'gcloud' - name: Templating the shell_script_config.j2 to shell_script_config template: From 99e820eb7d2d79651f8a40ab7a5de72f34486c68 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 16:11:29 +0530 Subject: [PATCH 100/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index bdc0129745..e29058043c 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -111,3 +111,7 @@ ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" +ml_GCP_secret_json_file: gcp_secrets.json +ml_GCP_Secrets: + account_name: {{ cloud_private_storage_accountname }} + account_key: {{ cloud_private_storage_secret }} From 618045ed8f33d036b41010ff520c88383b1d19eb Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 16:14:41 +0530 Subject: [PATCH 101/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index e29058043c..fd73a69673 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -112,6 +112,7 @@ ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" ml_GCP_secret_json_file: gcp_secrets.json +ml_GCP_bucket_name: {{ cloud_storage_telemetry_bucketname }} ml_GCP_Secrets: account_name: {{ cloud_private_storage_accountname }} account_key: {{ cloud_private_storage_secret }} From abab73d35530175638ea31e7d79781a450ee2af3 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Fri, 25 Nov 2022 14:43:50 +0530 Subject: [PATCH 102/203] Issue #ED-521 fix: Updated CSP variables with Finalized vars --- .../roles/stack-sunbird/templates/sunbird_player.env | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 2afd3af54c..c0ec466694 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -222,3 +222,14 @@ sunbird_gcloud_labels={{gcloud_labels_folder_name | default("labels")}} sunbird_gcloud_client_email={{gcloud_private_bucket_client_email | default("")}} sunbird_gcloud_private_key={{gcloud_private_bucket_private_key | default("")}} sunbird_gcloud_projectId={{gcloud_private_bucket_projectId | default("")}} + +#release-5.1.0 +#CSP configuration variables changes +cloud_service_provider={{cloud_service_provider}} +cloud_private_storage_accountname={{cloud_private_storage_accountname | default("")}} +cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} +cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} +cloud_private_storage_project={{cloud_private_storage_project | default("")}} +cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} +cloud_storage_resourceBundle_bucketname={{cloud_storage_resourceBundle_bucketname | default("label")}} +cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopCrash_bucketname | default("desktopappcrashlogs")}} From c72fa43de1022b894f58abe2681f8cd8b2a7ca93 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Fri, 25 Nov 2022 15:07:37 +0530 Subject: [PATCH 103/203] Issue #ED-521 fix: Updated CSP variables with Finalized vars --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index c0ec466694..0180418fb0 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -231,5 +231,5 @@ cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} cloud_private_storage_project={{cloud_private_storage_project | default("")}} cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} -cloud_storage_resourceBundle_bucketname={{cloud_storage_resourceBundle_bucketname | default("label")}} -cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopCrash_bucketname | default("desktopappcrashlogs")}} +cloud_storage_resourceBundle_bucketname={{cloud_storage_resourcebundle_bucketname | default("label")}} +cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopcrash_bucketname | default("desktopappcrashlogs")}} From 1a3073db50dac65a0eaf16e66949083ac24cbd2d Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Fri, 25 Nov 2022 15:33:09 +0530 Subject: [PATCH 104/203] Issue #ED-521 fix: Updated CSP variables with Finalized vars --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 0180418fb0..9cb6473418 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -231,5 +231,5 @@ cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} cloud_private_storage_project={{cloud_private_storage_project | default("")}} cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} -cloud_storage_resourceBundle_bucketname={{cloud_storage_resourcebundle_bucketname | default("label")}} +cloud_storage_resourceBundle_bucketname={{cloud_storage_label_bucketname | default("label")}} cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopcrash_bucketname | default("desktopappcrashlogs")}} From aaa3644c5af698e581878c848bd8f0eed598101f Mon Sep 17 00:00:00 2001 From: Surabhi Date: Fri, 25 Nov 2022 16:14:58 +0530 Subject: [PATCH 105/203] variables changes for sunbird cloud agnostic tool --- ansible/roles/stack-sunbird/templates/inbound.env | 8 ++++---- ansible/roles/stack-sunbird/templates/transformer.env | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index 331ae1d3fc..d8b3bcba55 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -59,10 +59,10 @@ NETCORE_WHATSAPP_SOURCE={{uci_netcore_whatsapp_source}} NETCORE_WHATSAPP_URI={{uci_netcore_whatsapp_uri | default('https://waapi.pepipost.com/api/v2/')}} #Sunbird CDN Configuration -SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} -SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration SELECTED_FILE_CDN=sunbird diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index b5be5c4451..f8bcde12dc 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -71,10 +71,10 @@ EXHAUST_TELEMETRY_ENABLED=TRUE POSTHOG_EVENT_ENABLED=FALSE #Sunbird CDN Configuration -SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} -SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration SELECTED_FILE_CDN=sunbird \ No newline at end of file From b26fbbc807c7d20328ea365f07fd97fb7df03786 Mon Sep 17 00:00:00 2001 From: Surabhi Date: Fri, 25 Nov 2022 20:13:43 +0530 Subject: [PATCH 106/203] variables changes - private vars --- ansible/roles/stack-sunbird/templates/inbound.env | 4 ++-- ansible/roles/stack-sunbird/templates/transformer.env | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index d8b3bcba55..1104836d93 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -60,8 +60,8 @@ NETCORE_WHATSAPP_URI={{uci_netcore_whatsapp_uri | default('https://waapi.pepipos #Sunbird CDN Configuration SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_private_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_private_storage_secret}} SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index f8bcde12dc..72aa85a7d1 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -72,8 +72,8 @@ POSTHOG_EVENT_ENABLED=FALSE #Sunbird CDN Configuration SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_private_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_private_storage_secret}} SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration From 4c14f86a8f3a68429b4309f34958f82932370f90 Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Mon, 28 Nov 2022 18:10:39 +0530 Subject: [PATCH 107/203] Make roles as anonymous for project certificate --- .../sunbird-RC/registry/schemas/ProjectCertificate.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index 0035464abb..cdfce21ac4 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -64,6 +64,7 @@ ], "ownershipAttributes": [], "roles": [ + "anonymous" ], "inviteRoles": [ "anonymous" @@ -72,4 +73,4 @@ "enableLogin": false, "credentialTemplate": "{{ upstream_url }}/schema/project_credential_template.json" } -} \ No newline at end of file +} From 1f4f735d84d93b205313a08e9fa2f25a81007da2 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Tue, 29 Nov 2022 10:46:11 +0530 Subject: [PATCH 108/203] Fix syntax (#3621) --- ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml | 4 ++-- ansible/roles/azure-cloud-storage/tasks/blob-upload.yml | 4 ++-- .../roles/azure-cloud-storage/tasks/upload-using-azcopy.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 82f806a803..53a57d7398 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,9 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is True + when: create_container == "True" - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 - poll: 10 \ No newline at end of file + poll: 10 diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml index 36423dcfc6..d895cf46cf 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -3,9 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is True + when: create_container == "True" - name: upload file to azure storage container shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 - poll: 10 \ No newline at end of file + poll: 10 diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index 0726b48f00..e37243cd70 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -3,11 +3,11 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is True + when: create_container == "True" - name: upload files and folders to azure storage using azcopy shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" environment: AZCOPY_CONCURRENT_FILES: "10" async: 10800 - poll: 10 \ No newline at end of file + poll: 10 From a5f853386839da3ab4313b1c635d59668a888d2d Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Tue, 29 Nov 2022 12:28:28 +0530 Subject: [PATCH 109/203] Change condition to match boolean (#3623) --- ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml | 2 +- ansible/roles/azure-cloud-storage/tasks/blob-upload.yml | 2 +- ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 53a57d7398..900ecee515 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,7 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container == "True" + when: create_container == True - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml index d895cf46cf..5430aba8fa 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -3,7 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container == "True" + when: create_container == True - name: upload file to azure storage container shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index e37243cd70..affbc8c002 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -3,7 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container == "True" + when: create_container == True - name: upload files and folders to azure storage using azcopy shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" From 20955da02e6434923d51649980b7472bf89ac521 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 29 Nov 2022 13:14:41 +0530 Subject: [PATCH 110/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index fd73a69673..99e7526e4b 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -111,8 +111,8 @@ ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" -ml_GCP_secret_json_file: gcp_secrets.json -ml_GCP_bucket_name: {{ cloud_storage_telemetry_bucketname }} +ml_GCP_secret_json_file: "gcp_secrets.json" +ml_GCP_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" ml_GCP_Secrets: - account_name: {{ cloud_private_storage_accountname }} - account_key: {{ cloud_private_storage_secret }} + account_name: "{{ cloud_private_storage_accountname }}" + account_key: "{{ cloud_private_storage_secret }}" From d002f1e51fc1aa8bb01c17c857a82b475b9aea3d Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Tue, 29 Nov 2022 13:32:27 +0530 Subject: [PATCH 111/203] updated CACHE_CONTEXT_URLS var (#3625) --- kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 index 62c9114a76..58d931b861 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 @@ -35,4 +35,4 @@ certificatesignenv: CERTIFICATE_PRIVATE_KEY: |- {{ CERTIFICATE_PRIVATE_KEY | default("''") | indent(width=4) }} SIGNING_KEY_TYPE: "{{ SIGNING_KEY_TYPE|default('RSA')}}" - CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,upstream_url + '/schema/sunbird_context.json,upstream_url + '/schema/credential_template.json')}}" \ No newline at end of file + CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,' + upstream_url + '/schema/sunbird_context.json,' + upstream_url + '/schema/credential_template.json') }}" \ No newline at end of file From b8b4fc4546effee1f6acef19ae78cc6b75fb36b8 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Tue, 29 Nov 2022 18:29:06 +0530 Subject: [PATCH 112/203] csp migration variables update --- .../core/analytics/templates/deployment.yaml | 16 ++++++++-------- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index 0926360f76..57198cb77b 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -35,14 +35,14 @@ spec: value: {{ .Values.env.min_heap | quote }} - name: MAX_HEAP value: {{ .Values.env.max_heap | quote }} - - name: azure_storage_secret - value: {{ .Values.env.azure_private_account_secret | quote }} - - name: azure_storage_key - value: {{ .Values.env.azure_private_account_name | quote }} - - name: public_azure_storage_secret - value: {{ .Values.env.azure_public_account_secret | quote }} - - name: public_azure_storage_key - value: {{ .Values.env.azure_public_account_name | quote }} + - name: cloud_storage_secret + value: {{ .Values.env.cloud_private_account_secret | quote }} + - name: cloud_storage_key + value: {{ .Values.env.cloud_private_account_name | quote }} + - name: public_cloud_storage_secret + value: {{ .Values.env.cloud_public_account_secret | quote }} + - name: public_cloud_storage_key + value: {{ .Values.env.cloud_public_account_name | quote }} - name: _JAVA_OPTIONS value: -Dlog4j2.formatMsgNoLookups=true envFrom: diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 354dcab3d3..f86925ad5c 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ sunbird_private_storage_account_key }} - azure_private_account_name: {{ sunbird_private_storage_account_name }} - azure_public_account_secret: {{ sunbird_public_storage_account_key }} - azure_public_account_name: {{ sunbird_public_storage_account_name }} + cloud_private_account_secret: {{ sunbird_private_storage_account_key }} + cloud_private_account_name: {{ sunbird_private_storage_account_name }} + cloud_public_account_secret: {{ sunbird_public_storage_account_key }} + cloud_public_account_name: {{ sunbird_public_storage_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From daef17772d996e7da7f90b2b1b925b17dec876cf Mon Sep 17 00:00:00 2001 From: saiakhil46 Date: Wed, 30 Nov 2022 17:37:18 +0530 Subject: [PATCH 113/203] added config.json file in certificatesign and updated templates --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 8 ++++++++ .../sunbird-RC/certificatesign/configs/config.json | 11 +++++++++++ .../certificatesign/templates/configmap.yaml | 13 +++++++++++++ .../certificatesign/templates/deployment.yaml | 7 +++++++ 4 files changed, 39 insertions(+) create mode 100644 kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index a57c847b97..8f4881089a 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -22,6 +22,14 @@ - "{{ chart_path }}/schemas/*.json" when: release_name == "registry" +- name: template config json + template: + src: "{{ item }}" + dest: "{{ item }}" + with_fileglob: + - "{{ chart_path }}/configs/*.json" + when: release_name == "certificatesign" + - name: Load role to decrypt private keys, copy to private keys helm chart include_role: name: mount-keys diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json new file mode 100644 index 0000000000..0909ace256 --- /dev/null +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json @@ -0,0 +1,11 @@ +{ + "issuers": { + "default": { + "publicKey": {{ CERTIFICATE_PUBLIC_KEY | default("") }}, + "privateKey": {{ CERTIFICATE_PRIVATE_KEY | default("") }}, + "signatureType": "RSA", + "verificationMethod": "did:india", + "$comment": "The above are test keys and it needs to be replaced before going to production" + } + } +} \ No newline at end of file diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml index 05b928173d..587c7e9dfb 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml @@ -9,3 +9,16 @@ metadata: creationTimestamp: null name: {{ .Chart.Name }}-config namespace: {{ .Values.namespace }} + +--- + +{{- $configs := .Files.Glob "configs/*" }} +{{ if $configs }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-conf + namespace: {{ .Values.namespace }} +data: +{{ (.Files.Glob "configs/*").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml index 7fa927020a..c271409ef9 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml @@ -34,6 +34,13 @@ spec: {{ toYaml .Values.resources | indent 10 }} ports: - containerPort: {{ .Values.network.port }} + volumeMounts: + - name: {{ .Chart.Name }}-conf + mountPath: /etc/signer + volumes: + - name: {{ .Chart.Name }}-conf + configMap: + name: {{ .Chart.Name }}-conf --- From 6f9ffe1985f150b2ca095442577fa01ed5572d4e Mon Sep 17 00:00:00 2001 From: saiakhil46 Date: Thu, 1 Dec 2022 11:36:12 +0530 Subject: [PATCH 114/203] updated certificatesign helm_chart --- .../sunbird-RC/certificatesign/configs/config.json | 4 ++-- kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json index 0909ace256..98052b982e 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json @@ -1,8 +1,8 @@ { "issuers": { "default": { - "publicKey": {{ CERTIFICATE_PUBLIC_KEY | default("") }}, - "privateKey": {{ CERTIFICATE_PRIVATE_KEY | default("") }}, + "publicKey": "{{ CERTIFICATESIGN_PUBLIC_KEY | default('') }}", + "privateKey": "{{ CERTIFICATESIGN_PRIVATE_KEY | default('') }}", "signatureType": "RSA", "verificationMethod": "did:india", "$comment": "The above are test keys and it needs to be replaced before going to production" diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 index 58d931b861..9e6beb68d4 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 @@ -31,8 +31,7 @@ certificatesignenv: PORT: "8079" QR_TYPE: {{ QR_TYPE|default('URL') }} CERTIFICATE_DOMAIN_URL: "https://{{domain_name}}" - CERTIFICATE_PUBLIC_KEY: {{CERTIFICATE_PUBLIC_KEY | default("''")}} - CERTIFICATE_PRIVATE_KEY: |- - {{ CERTIFICATE_PRIVATE_KEY | default("''") | indent(width=4) }} + CERTIFICATE_PUBLIC_KEY: "{{ CERTIFICATESIGN_PUBLIC_KEY | default('') }}" + CERTIFICATE_PRIVATE_KEY: "{{ CERTIFICATESIGN_PRIVATE_KEY | default('') }}" SIGNING_KEY_TYPE: "{{ SIGNING_KEY_TYPE|default('RSA')}}" CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,' + upstream_url + '/schema/sunbird_context.json,' + upstream_url + '/schema/credential_template.json') }}" \ No newline at end of file From 53268216c638206567129ea0269af973139898fc Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 1 Dec 2022 14:00:53 +0530 Subject: [PATCH 115/203] fix: adding verbosity for desktop deploy (#3633) --- pipelines/offlineinstaller/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/offlineinstaller/Jenkinsfile b/pipelines/offlineinstaller/Jenkinsfile index c97c01a9bd..a4e6a8f610 100644 --- a/pipelines/offlineinstaller/Jenkinsfile +++ b/pipelines/offlineinstaller/Jenkinsfile @@ -31,7 +31,7 @@ node() { } stage('Install the offline desktop Application') { ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" - ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\"" + ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\" -v" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From 8648a6183dc08554ee3353f3efc1afa908ed8a8b Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Fri, 2 Dec 2022 14:39:37 +0530 Subject: [PATCH 116/203] Set authentication_enabled as false by default Add authentication_enabled as a new env key for the registry service and set it false by default. --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index e7e6e6f31c..09181622b7 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -44,6 +44,7 @@ rccoreenv: connectionInfo_maxPoolSize: {{ registry_connectionInfo_maxPoolSize|default('200')}} auditTaskExecutor_queueCapacity: {{ registry_auditTaskExecutor_queueCapacity|default('100')}} taskExecutor_index_queueCapacity: {{ registry_taskExecutor_index_queueCapacity|default('100')}} + authentication_enabled: {{ registry_authentication_enabled|default('false')}} {# The below should get enabled once the service has probes implemented #} {# {{ registry_liveness_readiness | to_nice_yaml }} #} From 4b370d0f9a22cb5152d9dd15cdff7c74e710f584 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Sat, 3 Dec 2022 22:21:04 +0530 Subject: [PATCH 117/203] Release 5.1.0 (#3637) * Update deploy plugins for GCP * Fix looping issue --- ansible/deploy-plugins.yml | 30 +++++++++++++++++-- .../tasks/delete-batch-no-poll.yml | 6 ++++ .../tasks/upload-batch-no-poll.yml | 5 ++++ 3 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 8da2bd445e..6f5460809f 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -86,7 +86,8 @@ tags: - plugins when: cloud_service_provider == "azure" - + +### GCP tasks #### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables @@ -135,6 +136,31 @@ local_file_or_folder_path: "{{ source_file_name }}" tags: - preview + + - block: + - name: Authenticate to gcloud + include_role: + name: gcp-cloud-storage + tasks_from: gcloud-auth.yml + + - name: delete and re-upload plugins + include_role: + name: gcp-cloud-storage + tasks_from: "{{ item[0] }}" + vars: + file_delete_pattern: "content-plugins/{{ item[1] }}/*" + dest_folder_path: "content-plugins/{{ item[1] }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" + with_nested: + - ['delete-batch-no-poll.yml', 'upload-batch-no-poll.yml'] + - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" + + - name: Revoke gcloud access + include_role: + name: gcp-cloud-storage + tasks_from: gcloud-revoke.yml + tags: + - plugins when: cloud_service_provider == "gcloud" ################################### AWS tasks ######################### @@ -200,4 +226,4 @@ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" tags: - plugins - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" diff --git a/ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml b/ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml new file mode 100644 index 0000000000..ca02b8a064 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml @@ -0,0 +1,6 @@ +--- +- name: Delete folder recursively in gcp storage + shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }}" + async: 1800 + poll: 0 + diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml new file mode 100644 index 0000000000..0d8755ab26 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: Upload files from a local directory gcp storage + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + async: 1800 + poll: 0 From b8bffc23589f6715c26b475a602ca39bf51b481b Mon Sep 17 00:00:00 2001 From: Reshmi V Nair <54312456+reshmi-nair@users.noreply.github.com> Date: Tue, 6 Dec 2022 10:13:47 +0530 Subject: [PATCH 118/203] LR-262 CSP related variable - Cloud storage base path added (#3641) --- ansible/inventory/env/group_vars/all.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 1aaa166d5c..62cac6441d 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -132,6 +132,8 @@ cassandra_backup_azure_container_name: cassandra-backup cassandra_backup_dir: /data/cassandra/backup ### Release 5.0.0 ### cassandra_multi_dc_enabled: false +# Release-5.0.1 +cloud_storage_base_url: "{{cloud_storage_base_url}}" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From 4e4cd89769a48b356e9fcd066854c6e7d2ffce01 Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Tue, 6 Dec 2022 11:59:53 +0530 Subject: [PATCH 119/203] Add environment keys to enable async certificate Add environment keys to enable async certificate creation --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index 09181622b7..7668715b20 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -45,6 +45,16 @@ rccoreenv: auditTaskExecutor_queueCapacity: {{ registry_auditTaskExecutor_queueCapacity|default('100')}} taskExecutor_index_queueCapacity: {{ registry_taskExecutor_index_queueCapacity|default('100')}} authentication_enabled: {{ registry_authentication_enabled|default('false')}} + async_enabled: {{ registry_async_enabled|default('true')}} + webhook_enabled: {{ registry_webhook_enabled|default('true')}} + ZOOKEEPER_CLIENT_PORT: {{ registry_zookeeper_client_port|default('2181')}} + ZOOKEEPER_TICK_TIME: {{ registry_zookeeper_tick_time|default('2000')}} + KAFKA_BROKER_ID: {{ registry_kafka_broker_id|default('1')}} + KAFKA_ZOOKEEPER_CONNECT: "{{groups['processing-cluster-zookeepers']|join(':2181,')}}:2181" + KAFKA_ADVERTISED_LISTENERS: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092" + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: {{ registry_listener_security_protocol_map|default('INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT')}} + KAFKA_INTER_BROKER_LISTENER_NAME: {{ registry_inter_broker_listener_name|default('INTERNAL')}} + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: {{ registry_offsets_topic_replication_factor|default('1')}} {# The below should get enabled once the service has probes implemented #} {# {{ registry_liveness_readiness | to_nice_yaml }} #} From 5dc030bf3356a7bb06c7a356506fdafe3adc29bd Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 6 Dec 2022 14:25:17 +0530 Subject: [PATCH 120/203] Update config.j2 (#3644) --- .../ml-analytics-service/defaults/main.yml | 44 ++++++------- .../roles/ml-analytics-service/tasks/main.yml | 2 +- .../ml-analytics-service/templates/config.j2 | 61 ++++++++++--------- 3 files changed, 52 insertions(+), 55 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 99e7526e4b..da571d3bed 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -44,10 +44,8 @@ ml_analytics_kafka_survey_druid_topic_name: "{{ env_name }}.ml.survey.druid" ml_analytics_observation_log_folder_path: "{{ WORKDIR }}/logs/observation" ml_analytics_project_log_folder_path: "{{ WORKDIR }}/logs/project" ml_analytics_survey_log_folder_path: "{{ WORKDIR }}/logs/survey" -ml_analytics_azure_account_name: "{{ cloud_private_storage_accountname }}" -ml_analytics_azure_container_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_observation_azure_blob_path: "observation/status/" -ml_analytics_project_azure_blob_path: "projects/" +ml_analytics_project_cloud_blob_path: "projects/" ml_analytics_redis_host: "{{ml_redis_host | default(groups['dp-redis'][0])}}" ml_analytics_redis_port: "{{ ml_redis_device_port | default('6379') }}" ml_analytics_redis_db_name: "12" @@ -57,62 +55,60 @@ ml_analytics_api_authorization_key: "{{ml_api_auth_token | default('sunbird_api_ ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_internal_access_token')}}" ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' -ml_analytics_azure_sas_token: "{{ cloud_private_storage_secret }}" ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_obs_distinctCnt_azure_blob_path: "observation/distinctCount/" -ml_analytics_obs_distinctCnt_domain_azure_blob_path: "observation/distinctCount_domain/" -ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path: "observation/distinctCount_domain_criteria/" -ml_analytics_projects_distinctCnt_azure_blob_path: "projects/distinctCount/" +ml_analytics_obs_distinctCnt_cloud_blob_path: "observation/distinctCount/" +ml_analytics_obs_distinctCnt_domain_cloud_blob_path: "observation/distinctCount_domain/" +ml_analytics_obs_distinctCnt_domain_criteria_cloud_blob_path: "observation/distinctCount_domain_criteria/" +ml_analytics_projects_distinctCnt_cloud_blob_path: "projects/distinctCount/" ml_analytics_obs_distinctCnt_output_dir: "{{ WORKDIR }}/source/observations/distinctCount/output" ml_analytics_obs_distinctCnt_domain_output_dir: "{{ WORKDIR }}/source/observations/distinctCount_domain/output" ml_analytics_obs_distinctCnt_domain_criteria_output_dir: "{{ WORKDIR }}/source/observations/distinctCount_domain_criteria/output" ml_analytics_projects_distinctCnt_output_dir: "{{ WORKDIR }}/source/projects/distinctCount/output" -ml_analytics_survey_rollup_azure_blob_path: "survey/rollup/" +ml_analytics_survey_rollup_cloud_blob_path: "survey/rollup/" ml_analytics_druid_survey_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/rollup/sl_survey_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel",{"type":"long","name":"status_code"}, "solution_name", "solution_id"]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_user_id","fieldName":"user_id"},{"type":"HLLSketchBuild","name":"count_of_survey_submission_id","fieldName":"survey_submission_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_survey_rollup_output_dir: "/opt/sparkjobs/source/survey/output" -ml_analytics_project_rollup_azure_blob_path: "projects/rollup" -ml_analytics_observation_rollup_azure_blob_path: "observation/rollup" +ml_analytics_project_rollup_cloud_blob_path: "projects/rollup" +ml_analytics_observation_rollup_cloud_blob_path: "observation/rollup" ml_analytics_project_rollup_output_dir: "/opt/sparkjobs/source/projects/output_rollup" ml_analytics_observation_status_rollup_output_dir: "/opt/sparkjobs/source/observations/output_rollup" ml_analytics_druid_project_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/rollup/projects_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"iso"},"dimensionsSpec":{"dimensions":["project_title","project_goal","area_of_improvement","status_of_project","tasks_name","tasks_status","designation","task_evidence_status","project_id","task_id","project_created_type","parent_channel","program_id","program_name","project_updated_date","createdBy","program_externalId","private_program","task_deleted_flag","project_terms_and_condition","state_externalId","block_externalId","district_externalId","cluster_externalId","school_externalId","state_name","block_name","district_name","cluster_name","school_name","board_name","organisation_name","solution_id","organisation_id",{"name":"status_code","type":"long"}]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_createBy","fieldName":"createdBy"},{"type":"HLLSketchBuild","name":"count_of_project_id","fieldName":"project_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_druid_observation_status_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/rollup/observation_status_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-observation-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["status","user_id","solution_id","submission_id","entity_name","completedDate","program_id","private_program","solution_type","updatedAt","role_title","solution_name","program_name","channel","parent_channel","block_name","district_name","school_name","cluster_name","state_name","organisation_name","board_name","district_externalId","state_externalId","block_externalId","cluster_externalId","school_externalId","organisation_id",{"type":"long","name":"status_code"}]},"metricsSpec":[{"type":"count","name":"count"},{"type":"longSum","name":"sum___v","fieldName":"__v","expression":null},{"type":"HLLSketchBuild","name":"count_distinct_solution","fieldName":"solution_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_submission_id","fieldName":"submission_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_user_id","fieldName":"user_id","lgK":12,"tgtHllType":"HLL_4","round":false}]}}}' ml_analytics_druid_rollup_url: "{{groups['druid'][0]}}:8081" -ml_analytics_AWS_access_key: "{{ cloud_private_storage_accountname }}" -ml_analytics_AWS_secret_access_key: "{{ cloud_private_storage_secret }}" -ml_analytics_AWS_region_name: "{{ cloud_private_storage_region }}" -ml_analytics_AWS_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/projects/distinctCountPrglevel/output" -ml_analytics_projects_distinctCnt_prglevel_azure_blob_path: "projects/distinctCountPrglevel/" +ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCountPrglevel/" ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" -ml_analytics_survey_azure_blob_path : "survey/status/" +ml_analytics_survey_cloud_blob_path : "survey/status/" ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program"]},"metricsSpec":[]}}}' ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" -ml_analytics_program_dashboard_azure_blob_path: "{{ ml_program_blob_path | default('') }}" +ml_analytics_program_dashboard_cloud_blob_path: "{{ ml_program_blob_path | default('') }}" ml_druid_query_data: "{{ ml_druid_query | default('') }}" ml_program_dashboard_data: "{{ ml_program_data | default('') }}" ml_analytics_druid_query_url: "{{groups['druid'][0]}}:8082" ml_analytics_druid_observation_query_spec: '{"queryType":"scan","dataSource":"sl-observation","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","entityType","entityTypeId","observationId","observationName","observationSubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","school","block","district","cluster","state","schoolName","blockName","districtName","clusterName","stateName","schoolExternalId","blockExternalId","districtExternalId","clusterExternalId","stateExternalId","schoolTypes","administrationTypes","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","role_title","location_validated_with_geotag","distance_in_meters","entity","entityExternalId","entityName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","criteriaLevel","criteriaScore","submissionNumber","submissionTitle","channel","parent_channel","user_districtName","user_blockName","user_clusterName","appName","evidences","user_stateName","domainName","domainExternalId","childName","childType","childExternalid","level","criteriaDescription","programDescription","solutionDescription","label","imp_project_id","imp_project_title","imp_project_goal","imp_project_externalId","ancestorName","scoringSystem","domainLevel","domainScore","criteriaLevelReport","user_schoolName","user_schoolId","user_schoolUDISE_code","solution_type","organisation_name","user_boardName","district_externalId","state_externalId","block_externalId","cluster_externalId","organisation_id","user_type"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' ml_analytics_druid_observation_batch_ingestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"entityType"},{"type":"string","name":"entityTypeId"},{"type":"string","name":"observationId"},{"type":"string","name":"observationName"},{"type":"string","name":"observationSubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"school"},{"type":"string","name":"block"},{"type":"string","name":"district"},{"type":"string","name":"cluster"},{"type":"string","name":"state"},{"type":"string","name":"schoolName"},{"type":"string","name":"blockName"},{"type":"string","name":"districtName"},{"type":"string","name":"clusterName"},{"type":"string","name":"stateName"},{"type":"string","name":"schoolExternalId"},{"type":"string","name":"blockExternalId"},{"type":"string","name":"districtExternalId"},{"type":"string","name":"clusterExternalId"},{"type":"string","name":"stateExternalId"},{"type":"string","name":"schoolTypes"},{"type":"string","name":"administrationTypes"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"role_title"},{"type":"string","name":"location_validated_with_geotag"},{"type":"string","name":"distance_in_meters"},{"type":"string","name":"entity"},{"type":"string","name":"entityExternalId"},{"type":"string","name":"entityName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"criteriaLevel"},{"type":"string","name":"criteriaScore"},{"type":"string","name":"submissionNumber"},{"type":"string","name":"submissionTitle"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"user_districtName"},{"type":"string","name":"user_blockName"},{"type":"string","name":"user_clusterName"},{"type":"string","name":"appName"},{"type":"string","name":"evidences"},{"type":"string","name":"user_stateName"},{"type":"string","name":"domainName"},{"type":"string","name":"domainExternalId"},{"type":"string","name":"childName"},{"type":"string","name":"childType"},{"type":"string","name":"childExternalid"},{"type":"string","name":"level"},{"type":"string","name":"criteriaDescription"},{"type":"string","name":"programDescription"},{"type":"string","name":"solutionDescription"},{"type":"string","name":"label"},{"type":"string","name":"imp_project_id"},{"type":"string","name":"imp_project_title"},{"type":"string","name":"imp_project_goal"},{"type":"string","name":"imp_project_externalId"},{"type":"string","name":"ancestorName"},{"type":"string","name":"scoringSystem"},{"type":"string","name":"domainLevel"},{"type":"string","name":"domainScore"},{"name":"criteriaLevelReport","type":"boolean"},{"type":"string","name":"user_schoolName"},{"type":"string","name":"user_schoolId"},{"type":"string","name":"user_schoolUDISE_code"},{"type":"string","name":"solution_type"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_boardName"},{"type":"string","name":"district_externalId"},{"type":"string","name":"state_externalId"},{"type":"string","name":"block_externalId"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"user_type"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' -ml_analytics_observation_batchupdate_azure_blob_path: "observation/batchDeletion" +ml_analytics_observation_batchupdate_cloud_blob_path: "observation/batchDeletion" ml_analytics_observation_submission_id_filepath: "{{ WORKDIR }}/ml-analytics-service/observations/submissions.csv" ml_analytics_observation_batchupdate_output_dir: "{{ WORKDIR }}/source/observations/" ml_analytics_druid_survey_query_spec : '{"queryType":"scan","dataSource":"sl-survey","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","surveyId","surveyName","surveySubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","channel","parent_channel","appName","organisation_name","user_subtype","user_type","board_name","district_code","district_name","district_externalId","block_code","block_name","block_externalId","school_code","school_name","school_externalId","cluster_code","cluster_name","cluster_externalId","state_code","state_name","state_externalId","organisation_id","evidences"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' ml_analytics_druid_survey_batch_ingestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' -ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" +ml_analytics_survey_batchupdate_cloud_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' -ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" -ml_GCP_secret_json_file: "gcp_secrets.json" -ml_GCP_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" -ml_GCP_Secrets: +ml_Cloud_secret_json_file: "cloud_secrets.json" +ml_Cloud_Secrets: account_name: "{{ cloud_private_storage_accountname }}" account_key: "{{ cloud_private_storage_secret }}" +cloud_private_storage_accountname: "{{ cloud_private_storage_accountname }}" +cloud_storage_telemetry_bucketname: "{{ cloud_storage_telemetry_bucketname }}" +cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" +cloud_private_storage_region: "{{ cloud_private_storage_region }}" +cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index dfa015c99c..ee609b8806 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -106,7 +106,7 @@ backup: yes - name: Copy GCP Secrets to JSON file - copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" + copy: dest="{{config_path}}/{{ml_Cloud_secret_json_file}}" content="{{ ml_Cloud_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" when: ML_Cloud_Service_Provider == 'gcloud' - name: Templating the shell_script_config.j2 to shell_script_config diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 770de394cb..70160c64c0 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -154,50 +154,51 @@ survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log [ORACLE] -endpoint_url = {{ ml_ORACLE_endpoint_url }} +endpoint_url = {{ cloud_private_storage_endpoint }} -access_key = {{ ml_ORACLE_access_key }} +access_key = {{ cloud_private_storage_accountname }} -secret_access_key = {{ ml_ORACLE_secret_access_key }} +secret_access_key = {{ cloud_private_storage_secret }} -region_name = {{ ml_ORACLE_region_name }} +region_name = {{ cloud_private_storage_region }} -bucket_name = {{ ml_ORACLE_bucket_name }} +bucket_name = {{ cloud_storage_telemetry_bucketname }} {% elif ML_Cloud_Service_Provider is eq 'gcloud' %} [GCP] -secret_data = {{ ml_GCP_secret_json_file }} +secret_data = {{ ml_Cloud_secret_json_file }} -bucket_name = {{ ml_GCP_bucket_name }} +bucket_name = {{ cloud_storage_telemetry_bucketname }} {% elif ML_Cloud_Service_Provider is eq 'aws' %} [AWS] -service_name = {{ ml_AWS_service_name }} +service_name = S3 -access_key = {{ ml_AWS_access_key }} +access_key = {{ cloud_private_storage_accountname }} -secret_access_key = {{ ml_AWS_secret_access_key }} +secret_access_key = {{ cloud_private_storage_secret }} -region_name = {{ ml_AWS_region_name }} +region_name = {{ cloud_private_storage_region }} -bucket_name = {{ ml_AWS_bucket_name }} +bucket_name = {{ cloud_storage_telemetry_bucketname }} {% else %} [AZURE] -account_name = {{ ml_analytics_azure_account_name }} +account_name = {{ cloud_private_storage_accountname }} -sas_token = {{ ml_analytics_azure_sas_token }} +sas_token = {{ cloud_private_storage_secret }} -container_name = {{ ml_analytics_azure_container_name }} +container_name = {{ cloud_storage_telemetry_bucketname }} -account_key = {{ ml_analytics_azure_account_key }} +account_key = {{ cloud_private_storage_secret }} +{% endif %} [OUTPUT_DIR] @@ -250,32 +251,32 @@ program_dashboard_data = {{ ml_program_dashboard_data }} cloud_module_path = {{ ml_analytics_cloud_package_path }} -observation_blob_path = {{ ml_analytics_observation_azure_blob_path }} +observation_blob_path = {{ ml_analytics_observation_cloud_blob_path }} -projects_blob_path = {{ ml_analytics_project_azure_blob_path }} +projects_blob_path = {{ ml_analytics_project_cloud_blob_path }} -observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_azure_blob_path }} +observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_cloud_blob_path }} -observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_azure_blob_path }} +observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_cloud_blob_path }} -observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path }} +observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_cloud_blob_path }} -projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_azure_blob_path }} +projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_cloud_blob_path }} -projects_distinctCnt_prgmlevel_blob_path = {{ ml_analytics_projects_distinctCnt_prglevel_azure_blob_path }} +projects_distinctCnt_prgmlevel_blob_path = {{ ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path }} -projects_rollup_blob_path = {{ ml_analytics_project_rollup_azure_blob_path }} +projects_rollup_blob_path = {{ ml_analytics_project_rollup_cloud_blob_path }} -observation_rollup_blob_path = {{ ml_analytics_observation_rollup_azure_blob_path }} +observation_rollup_blob_path = {{ ml_analytics_observation_rollup_cloud_blob_path }} -survey_rollup_blob_path = {{ ml_analytics_survey_rollup_azure_blob_path }} +survey_rollup_blob_path = {{ ml_analytics_survey_rollup_cloud_blob_path }} -survey_blob_path = {{ ml_analytics_survey_azure_blob_path }} +survey_blob_path = {{ ml_analytics_survey_cloud_blob_path }} -projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} +projects_program_csv = {{ ml_analytics_program_dashboard_cloud_blob_path }} -observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} +observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_cloud_blob_path }} -survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_azure_blob_path}} +survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_cloud_blob_path}} cname_url = {{ ml_analytics_cname_url }} From e942e5df1f6b47b64c1c3f404952da9d31b11136 Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Tue, 6 Dec 2022 14:26:47 +0530 Subject: [PATCH 121/203] Add kafka_bootstrap_address env key Add kafka_bootstrap_address env key --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index 7668715b20..c582ae8941 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -52,6 +52,7 @@ rccoreenv: KAFKA_BROKER_ID: {{ registry_kafka_broker_id|default('1')}} KAFKA_ZOOKEEPER_CONNECT: "{{groups['processing-cluster-zookeepers']|join(':2181,')}}:2181" KAFKA_ADVERTISED_LISTENERS: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092" + kafka_bootstrap_address: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092" KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: {{ registry_listener_security_protocol_map|default('INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT')}} KAFKA_INTER_BROKER_LISTENER_NAME: {{ registry_inter_broker_listener_name|default('INTERNAL')}} KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: {{ registry_offsets_topic_replication_factor|default('1')}} From e7f3b0f70e86af5bbc42322bb3ebfb8c3009c956 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Tue, 6 Dec 2022 14:44:53 +0530 Subject: [PATCH 122/203] Issue #ED-536 feat: Added fetch all API for form --- ansible/roles/kong-api/defaults/main.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index ff4323914e..2e431aa0cb 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9815,3 +9815,24 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true + +- name: fetchAllForm + uris: "{{ data_service_prefix }}/v1/form/fetchAll" + upstream_url: "{{ player_service_url }}/plugin/v1/form/fetchAll" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - appAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ small_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ medium_request_size_limit }}" + - name: opa-checks + config.required: false + config.enabled: false From 6930cd7ec16b5bdef9be97d499f70528bdc2dca4 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Tue, 6 Dec 2022 17:34:02 +0530 Subject: [PATCH 123/203] Issue #ED-536 fix: fetch all api added --- ansible/roles/kong-api/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index 2e431aa0cb..bb6e0c7c56 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9826,7 +9826,7 @@ kong_apis: - "{{ statsd_pulgin }}" - name: acl config.whitelist: - - appAdmin + - formUpdate - name: rate-limiting config.policy: local config.hour: "{{ small_rate_limit_per_hour }}" From 9aef1be4b81af6927643fa8acff6c30075bb5d79 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Wed, 7 Dec 2022 11:09:30 +0530 Subject: [PATCH 124/203] Update main.yml (#3649) --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index da571d3bed..a480b01ccd 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -44,7 +44,7 @@ ml_analytics_kafka_survey_druid_topic_name: "{{ env_name }}.ml.survey.druid" ml_analytics_observation_log_folder_path: "{{ WORKDIR }}/logs/observation" ml_analytics_project_log_folder_path: "{{ WORKDIR }}/logs/project" ml_analytics_survey_log_folder_path: "{{ WORKDIR }}/logs/survey" -ml_analytics_observation_azure_blob_path: "observation/status/" +ml_analytics_observation_cloud_blob_path: "observation/status/" ml_analytics_project_cloud_blob_path: "projects/" ml_analytics_redis_host: "{{ml_redis_host | default(groups['dp-redis'][0])}}" ml_analytics_redis_port: "{{ ml_redis_device_port | default('6379') }}" From 2bbfe4eca53e631db698d37128992434660f61a4 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Wed, 7 Dec 2022 11:25:32 +0530 Subject: [PATCH 125/203] Release 5.1.0 vars change (#3650) --- ansible/artifacts-download.yml | 11 +++++------ ansible/artifacts-upload.yml | 11 +++++------ ansible/assets-upload.yml | 4 ++-- ansible/deploy-plugins.yml | 17 +++++++++-------- ansible/desktop-faq-upload.yml | 12 ++++++------ ansible/dial_upload-schema.yml | 9 ++++----- ansible/kp_upload-schema.yml | 9 ++++----- ansible/plugins.yml | 10 +++++----- .../roles/cassandra-backup/defaults/main.yml | 6 +++++- ansible/roles/cassandra-backup/tasks/main.yml | 9 ++++----- .../roles/cassandra-restore/defaults/main.yml | 5 ++++- ansible/roles/cassandra-restore/tasks/main.yml | 7 +++---- ansible/roles/cert-templates/defaults/main.yml | 2 +- ansible/roles/cert-templates/tasks/main.yml | 9 ++++----- ansible/roles/desktop-deploy/defaults/main.yml | 2 +- ansible/roles/desktop-deploy/tasks/main.yml | 12 ++++++------ ansible/roles/es-azure-snapshot/tasks/main.yml | 4 ++-- .../es6/tasks/plugins/repository-azure.yml | 6 +++--- .../roles/gcp-cloud-storage/defaults/main.yml | 6 +++--- .../roles/gcp-cloud-storage/tasks/download.yml | 4 ++-- .../tasks/upload-batch-no-poll.yml | 2 +- .../gcp-cloud-storage/tasks/upload-batch.yml | 2 +- .../roles/gcp-cloud-storage/tasks/upload.yml | 2 +- ansible/roles/grafana-backup/defaults/main.yml | 6 ++++-- ansible/roles/grafana-backup/tasks/main.yml | 7 +++---- .../jenkins-backup-upload/defaults/main.yml | 5 ++++- .../roles/jenkins-backup-upload/tasks/main.yml | 7 +++---- .../log-es6/tasks/plugins/repository-azure.yml | 6 +++--- ansible/roles/mongodb-backup/defaults/main.yml | 5 ++++- ansible/roles/mongodb-backup/tasks/main.yml | 7 +++---- .../defaults/main.yml | 5 ++++- .../tasks/main.yml | 7 +++---- .../defaults/main.yml | 6 +++++- .../tasks/main.yml | 7 +++---- .../roles/postgresql-backup/defaults/main.yml | 3 +++ ansible/roles/postgresql-backup/tasks/main.yml | 9 ++++----- .../roles/postgresql-restore/defaults/main.yml | 5 ++++- ansible/roles/postgresql-restore/tasks/main.yml | 7 +++---- .../prometheus-backup-v2/defaults/main.yml | 5 ++++- .../roles/prometheus-backup-v2/tasks/main.yml | 7 +++---- .../roles/prometheus-backup/defaults/main.yml | 5 ++++- ansible/roles/prometheus-backup/tasks/main.yml | 7 +++---- .../roles/prometheus-restore/defaults/main.yml | 5 ++++- ansible/roles/prometheus-restore/tasks/main.yml | 7 +++---- ansible/roles/redis-backup/defaults/main.yml | 5 ++++- ansible/roles/redis-backup/tasks/main.yml | 7 +++---- ansible/uploadFAQs.yml | 4 ++-- 47 files changed, 164 insertions(+), 141 deletions(-) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 043446554d..25869f80a3 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -12,8 +12,8 @@ blob_container_name: "{{ artifacts_container }}" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" - storage_account_name: "{{ azure_artifact_storage_account_name }}" - storage_account_key: "{{ azure_artifact_storage_account_key }}" + storage_account_name: "{{ cloud_artifact_storage_accountname }}" + storage_account_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "azure" - name: download artifact from gcloud storage @@ -21,9 +21,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" - dest_folder_name: "{{ artifacts_container }}" - dest_file_name: "{{ artifact }}" + gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" @@ -38,4 +37,4 @@ aws_default_region: "{{ aws_region }}" aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 32e866808c..31f2589a68 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -13,8 +13,8 @@ container_public_access: "off" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" - storage_account_name: "{{ azure_artifact_storage_account_name }}" - storage_account_key: "{{ azure_artifact_storage_account_key }}" + storage_account_name: "{{ cloud_artifact_storage_accountname }}" + storage_account_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "azure" - name: upload artifact to gcloud storage @@ -22,9 +22,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" - dest_folder_name: "{{ artifacts_container }}" - dest_file_name: "{{ artifact }}" + gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" @@ -39,4 +38,4 @@ aws_default_region: "{{ aws_region }}" aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 12021680fe..8adf3cae74 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -18,8 +18,8 @@ blob_container_name: "{{ player_cdn_storage }}" container_public_access: "container" blob_container_folder_path: "" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - name: delete files and folders from azure storage using azcopy diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 6f5460809f..ae7f21637a 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -24,8 +24,8 @@ set_fact: blob_container_name: "{{ plugin_storage }}" container_public_access: "container" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always @@ -92,8 +92,9 @@ block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ plugin_storage }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + gcp_path: "" + tags: - always @@ -103,7 +104,7 @@ name: gcp-cloud-storage tasks_from: delete-batch.yml vars: - file_delete_pattern: "{{ dest_folder_name }}/{{ folder_name }}" + file_delete_pattern: "{{ folder_name }}" tags: - content-editor - collection-editor @@ -116,7 +117,7 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_path: "{{ folder_name }}" + gcp_path: "{{ folder_name }}" local_file_or_folder_path: "{{ source_name }}" tags: - content-editor @@ -132,7 +133,7 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - dest_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + gcp_path: "artefacts/content-player/content-player-{{ player_version_number }}.zip" local_file_or_folder_path: "{{ source_file_name }}" tags: - preview @@ -149,7 +150,7 @@ tasks_from: "{{ item[0] }}" vars: file_delete_pattern: "content-plugins/{{ item[1] }}/*" - dest_folder_path: "content-plugins/{{ item[1] }}" + gcp_path: "content-plugins/{{ item[1] }}" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" with_nested: - ['delete-batch-no-poll.yml', 'upload-batch-no-poll.yml'] diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 911153576b..d36b0e3721 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -20,8 +20,8 @@ tasks_from: blob-upload.yml vars: container_public_access: "container" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" tags: - upload-desktop-faq @@ -32,8 +32,8 @@ tasks_from: blob-upload.yml vars: container_public_access: "off" - storage_account_name: "{{ azure_private_storage_account_name }}" - storage_account_key: "{{ azure_private_storage_account_key }}" + storage_account_name: "{{ cloud_private_storage_accountname }}" + storage_account_key: "{{ cloud_private_storage_secret }}" tags: - upload-label @@ -44,8 +44,8 @@ tasks_from: blob-upload-batch.yml vars: container_public_access: "container" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" tags: - upload-chatbot-config - upload-batch diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index f046e63462..6572c12e55 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -35,8 +35,8 @@ container_public_access: "blob" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "dial_schema_template_files" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" when: cloud_service_provider == "azure" - name: upload batch of files to aws s3 @@ -57,9 +57,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ dial_plugin_storage }}" - dest_folder_path: "schemas/local" + gcp_bucket_name: "{{ cloud_storage_dial_bucketname }}" + gcp_path: "schemas/local" local_file_or_folder_path: "dial_schema_template_files" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index aecdab077a..2b09dac310 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -19,8 +19,8 @@ container_public_access: "container" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "{{ source_name }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" when: cloud_service_provider == "azure" - name: upload batch of files to aws s3 @@ -41,8 +41,7 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ plugin_storage }}" - dest_folder_path: "schemas/local" + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + gcp_path: "{{ schemas/local" local_file_or_folder_path: "{{ source_name }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/plugins.yml b/ansible/plugins.yml index ab32d9f756..fa5967b462 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -20,8 +20,8 @@ blob_delete_pattern: "content-plugins/{{ plugins_name }}" blob_container_folder_path: "/content-plugins/{{ plugins_name }}" local_file_or_folder_path: "{{ source_file }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" - name: delete batch of files from azure storage include_role: @@ -34,14 +34,14 @@ tasks_from: blob-upload-batch.yml when: cloud_service_provider == "azure" +### GCP tasks ### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ plugin_storage }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + gcp_path: "/content-plugins/{{ plugins_name }}" file_delete_pattern: "content-plugins/{{ plugins_name }}" - dest_folder_path: "/content-plugins/{{ plugins_name }}" local_file_or_folder_path: "{{ source_file }}" - name: delete files and folders from gcloud storage diff --git a/ansible/roles/cassandra-backup/defaults/main.yml b/ansible/roles/cassandra-backup/defaults/main.yml index 139fd1d810..dffec63096 100644 --- a/ansible/roles/cassandra-backup/defaults/main.yml +++ b/ansible/roles/cassandra-backup/defaults/main.yml @@ -1,5 +1,6 @@ cassandra_root_dir: '/etc/cassandra' data_dir: '/var/lib/cassandra/data' + cassandra_backup_azure_container_name: core-cassandra # This variable is added for the below reason - @@ -7,4 +8,7 @@ cassandra_backup_azure_container_name: core-cassandra # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" + +cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index 507aeb190b..304385515c 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -37,11 +37,11 @@ name: azure-cloud-storage tasks_from: upload-using-azcopy.yml vars: - blob_container_name: "{{ cassandra_backup_storage }}" + blob_container_name: "{{ cloud_storage_cassandrabackup_bucketname }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" @@ -63,9 +63,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ cassandra_backup_storage }}" - dest_folder_path: "" + gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/cassandra-restore/defaults/main.yml b/ansible/roles/cassandra-restore/defaults/main.yml index 4a4828144e..834c103d58 100644 --- a/ansible/roles/cassandra-restore/defaults/main.yml +++ b/ansible/roles/cassandra-restore/defaults/main.yml @@ -5,4 +5,7 @@ user_home: "/home/{{ ansible_ssh_user }}/" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" + +cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 8a47ab7089..304c9b8b09 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -14,7 +14,7 @@ blob_container_name: "{{ cassandra_backup_storage }}" blob_file_name: "{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -37,9 +37,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ cassandra_backup_storage }}" - dest_file_name: "{{ cassandra_restore_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/cert-templates/defaults/main.yml b/ansible/roles/cert-templates/defaults/main.yml index c621d6ddb8..1ca7f44958 100644 --- a/ansible/roles/cert-templates/defaults/main.yml +++ b/ansible/roles/cert-templates/defaults/main.yml @@ -8,4 +8,4 @@ certs_badge_key_id: "" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -cert_service_storage: "{{ cert_service_container_name }}" \ No newline at end of file +cert_service_storage: "{{ cert_service_container_name }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 78f1f769b3..0700f1e61a 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -40,8 +40,8 @@ container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" - storage_account_name: "{{ azure_private_storage_account_name }}" - storage_account_key: "{{ azure_private_storage_account_key }}" + storage_account_name: "{{ cloud_private_storage_accountname }}" + storage_account_key: "{{ cloud_private_storage_secret }}" when: cloud_service_provider == "azure" - name: upload batch of files to aws s3 @@ -62,8 +62,7 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ cert_service_storage }}" - dest_folder_path: "" + gcp_bucket_name: "{{ cloud_storage_certservice_bucketname }}" + gcp_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" - gcp_bucket_name: "{{ gcloud_private_bucket_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index 3010db2349..06bdd6fe1f 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -7,4 +7,4 @@ offline_installer_container_name: "{{env}}-offlineinstaller" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file +offline_installer_storage: "{{ offline_installer_container_name }}" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 09c41300ef..70fa94cb1d 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -55,8 +55,8 @@ set_fact: blob_container_name: "{{ offline_installer_storage }}" container_public_access: "blob" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" - name: upload batch of files to azure storage include_role: @@ -103,19 +103,19 @@ local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "aws" +### GCP Tasks ### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ offline_installer_storage }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" - name: upload batch of files to gcloud storage include_role: name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_path: "" + gcp_path: "" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" - name: upload batch of files to gcloud storage @@ -123,6 +123,6 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_path: "latest" + gcp_path: "latest" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index 8ce0fcd267..23be535db9 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -13,8 +13,8 @@ vars: blob_container_name: "{{ es_backup_storage }}" container_public_access: "off" - storage_account_name: "{{ azure_management_storage_account_name }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" + storage_account_key: "{{ cloud_management_storage_secret }}" - name: Create Azure Repository uri: diff --git a/ansible/roles/es6/tasks/plugins/repository-azure.yml b/ansible/roles/es6/tasks/plugins/repository-azure.yml index 170a84000e..dd7fcc3a20 100644 --- a/ansible/roles/es6/tasks/plugins/repository-azure.yml +++ b/ansible/roles/es6/tasks/plugins/repository-azure.yml @@ -1,7 +1,7 @@ --- - name: Add default azure account name for backups become: yes - shell: echo "{{ azure_management_storage_account_name }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" @@ -9,7 +9,7 @@ - name: Add default azure account key for backups become: yes - shell: echo "{{ azure_management_storage_account_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key no_log: True environment: - ES_PATH_CONF: "{{ conf_dir }}" \ No newline at end of file + ES_PATH_CONF: "{{ conf_dir }}" diff --git a/ansible/roles/gcp-cloud-storage/defaults/main.yml b/ansible/roles/gcp-cloud-storage/defaults/main.yml index 086cf9c50d..b0fd847b26 100644 --- a/ansible/roles/gcp-cloud-storage/defaults/main.yml +++ b/ansible/roles/gcp-cloud-storage/defaults/main.yml @@ -10,8 +10,8 @@ gcp_storage_key_file: "" # Folder name in GCP bucket # Example - -# dest_folder_name: "my-destination-folder" -dest_folder_name: "" +# gcp_path: "my-destination-folder" +gcp_path: "" # The delete pattern to delete files and folder # Example - @@ -36,7 +36,7 @@ dest_file_name: "" # The folder path in gcloud storage to upload the files starting from the root of the bucket # This path should start with / if we provide a value for this variable since we are going to append this path as below -# {{ bucket_name }}{{ dest_folder_name }} +# {{ bucket_name }}{{ gcp_path }} # The above translates to "my-bucket/my-folder-path" # Example - # dest_folder_path: "/my-folder/json-files-folder" diff --git a/ansible/roles/gcp-cloud-storage/tasks/download.yml b/ansible/roles/gcp-cloud-storage/tasks/download.yml index c8c6e956ad..73bf76bb04 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/download.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/download.yml @@ -3,9 +3,9 @@ include_tasks: gcloud-auth.yml - name: Download from gcloud storage - shell: gsutil cp "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" "{{ local_file_or_folder_path }}" + shell: gsutil cp "gs://{{ gcp_bucket_name }}/{{ gcp_path }}" "{{ local_file_or_folder_path }}" async: 3600 poll: 10 - name: Revoke gcloud access - include_tasks: gcloud-revoke.yml \ No newline at end of file + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml index 0d8755ab26..40e9b8a66a 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml @@ -1,5 +1,5 @@ --- - name: Upload files from a local directory gcp storage - shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ gcp_path }}" async: 1800 poll: 0 diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml index 49abd5b822..dc103969aa 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml @@ -3,7 +3,7 @@ include_tasks: gcloud-auth.yml - name: Upload files from a local directory gcp storage - shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ gcp_path}}" async: 3600 poll: 10 diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload.yml b/ansible/roles/gcp-cloud-storage/tasks/upload.yml index 2f88d9407f..de766a94c7 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/upload.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/upload.yml @@ -3,7 +3,7 @@ include_tasks: gcloud-auth.yml - name: Upload to gcloud storage - shell: gsutil cp "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" + shell: gsutil cp "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ gcp_path }}" async: 3600 poll: 10 diff --git a/ansible/roles/grafana-backup/defaults/main.yml b/ansible/roles/grafana-backup/defaults/main.yml index fc62843964..b6850bee97 100644 --- a/ansible/roles/grafana-backup/defaults/main.yml +++ b/ansible/roles/grafana-backup/defaults/main.yml @@ -5,10 +5,12 @@ grafana_data_dir: /var/dockerdata/grafana/grafana.db sunbird_management_storage_account_name: sunbird_management_storage_account_key: '' grafana_backup_azure_container_name: grafana-backup - # This variable is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" \ No newline at end of file +grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" + +cloud_storage_grafanabackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_grafanabackup_foldername: 'grafana-backup' diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 2c8520030c..c309d409fb 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -28,7 +28,7 @@ container_public_access: "off" blob_file_name: "{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -50,9 +50,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ grafana_backup_storage }}" - dest_file_name: "{{ grafana_backup_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_grafanabackup_bucketname }}" + gcp_path: "{{ cloud_storage_grafanabackup_foldername }}/{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml index 40a231d3d5..d9c5c9f58a 100644 --- a/ansible/roles/jenkins-backup-upload/defaults/main.yml +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -9,4 +9,7 @@ jenkins_backup_max_delay_in_days: 1 # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" \ No newline at end of file +jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" + +cloud_storage_jenkinsbackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_jenkinsbackup_foldername: 'jenkins-backup' diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index a94e57fe4a..018a9498f1 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -21,7 +21,7 @@ container_public_access: "off" blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -43,9 +43,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ jenkins_backup_storage }}" - dest_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" + gcp_bucket_name: "{{ cloud_storage_jenkinsbackup_bucketname }}" + gcp_path: "{{ cloud_storage_jenkinsbackup_foldername }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/log-es6/tasks/plugins/repository-azure.yml b/ansible/roles/log-es6/tasks/plugins/repository-azure.yml index 9c3b9d3774..43d512803f 100644 --- a/ansible/roles/log-es6/tasks/plugins/repository-azure.yml +++ b/ansible/roles/log-es6/tasks/plugins/repository-azure.yml @@ -1,7 +1,7 @@ --- - name: Add default azure account name for backups become: yes - shell: echo "{{ azure_management_storage_account_name }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account no_log: True environment: ES_PATH_CONF: "{{ es_conf_dir }}" @@ -9,7 +9,7 @@ - name: Add default azure account key for backups become: yes - shell: echo "{{ azure_management_storage_account_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key no_log: True environment: - ES_PATH_CONF: "{{ es_conf_dir }}" \ No newline at end of file + ES_PATH_CONF: "{{ es_conf_dir }}" diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index da5a0f710f..1d54a69541 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -6,4 +6,7 @@ mongo_backup_azure_container_name: "mongodb-backup" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" \ No newline at end of file +mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" + +cloud_storage_mongodbbackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_mongodbbackup_foldername: 'mongodb-backup' diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 0762f2754f..94b157648a 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -23,7 +23,7 @@ container_public_access: "off" blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -45,9 +45,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ mongo_backup_storage }}" - dest_file_name: "{{ mongo_backup_file_name }}.tar.gz" + gcp_bucket_name: "{{ cloud_storage_mongodbbackup_bucketname }}" + gcp_path: "{{ cloud_storage_mongodbbackup_foldername }}/{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/postgres-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-managed-service-backup/defaults/main.yml index 6e637bf3ce..6af37d7f96 100644 --- a/ansible/roles/postgres-managed-service-backup/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-backup/defaults/main.yml @@ -14,4 +14,7 @@ postgres_password: "{{postgres_password}}" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index ea206146b3..660814d9bd 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -50,7 +50,7 @@ container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -72,9 +72,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgresql_backup_storage }}" - dest_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/postgres-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-managed-service-restore/defaults/main.yml index 4ac0d62151..41164b445c 100644 --- a/ansible/roles/postgres-managed-service-restore/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-restore/defaults/main.yml @@ -18,4 +18,8 @@ postgres_env: # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" \ No newline at end of file +postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" + + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 0299ff3f73..8efa3dd561 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -17,7 +17,7 @@ blob_container_name: "{{ postgres_backup_storage }}" blob_file_name: "{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -39,9 +39,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgres_backup_storage }}" - dest_file_name: "{{ postgres_backup_filename }}" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index 0b6a9bca4a..30902eac17 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -8,3 +8,6 @@ postgresql_backup_azure_container_name: postgresql-backup # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 65116bede0..6710e49503 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -22,7 +22,7 @@ container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -44,11 +44,10 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgresql_backup_storage }}" - dest_file_name: "{{ postgresql_backup_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" - name: clean up backup dir after upload - file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file + file: path="{{ postgresql_backup_dir }}" state=absent diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml index 5f0708ed34..2bcc525469 100644 --- a/ansible/roles/postgresql-restore/defaults/main.yml +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -10,4 +10,7 @@ postgresql_restore_azure_container_name: postgresql-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" \ No newline at end of file +postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index 877e178987..b3411c5445 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -12,7 +12,7 @@ blob_container_name: "{{ postgresql_restore_storage }}" blob_file_name: "{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -34,9 +34,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgresql_restore_storage }}" - dest_file_name: "{{ postgresql_restore_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/prometheus-backup-v2/defaults/main.yml b/ansible/roles/prometheus-backup-v2/defaults/main.yml index e3752a693f..430c9df4cb 100644 --- a/ansible/roles/prometheus-backup-v2/defaults/main.yml +++ b/ansible/roles/prometheus-backup-v2/defaults/main.yml @@ -7,4 +7,7 @@ prometheus_backup_azure_container_name: prometheus-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" + +cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 3831080dbc..9b25fc465c 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -25,7 +25,7 @@ container_public_access: "off" blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -47,9 +47,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ prometheus_backup_storage }}" - dest_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/prometheus-backup/defaults/main.yml b/ansible/roles/prometheus-backup/defaults/main.yml index 17425092ee..3bba75124c 100644 --- a/ansible/roles/prometheus-backup/defaults/main.yml +++ b/ansible/roles/prometheus-backup/defaults/main.yml @@ -11,4 +11,7 @@ backup_storage_key: '' # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" + +cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 55a51287ae..14cc74a41a 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -34,7 +34,7 @@ container_public_access: "off" blob_file_name: "{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -56,9 +56,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ prometheus_backup_storage }}" - dest_file_name: "{{ prometheus_backup_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/prometheus-restore/defaults/main.yml b/ansible/roles/prometheus-restore/defaults/main.yml index bee405457a..580e865060 100644 --- a/ansible/roles/prometheus-restore/defaults/main.yml +++ b/ansible/roles/prometheus-restore/defaults/main.yml @@ -6,4 +6,7 @@ prometheus_backup_azure_container_name: prometheus-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" + +cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 2232770fdd..0c9b0749a9 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -10,7 +10,7 @@ blob_container_name: "{{ prometheus_backup_storage }}" blob_file_name: "{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -32,9 +32,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ prometheus_backup_storage }}" - dest_file_name: "{{ prometheus_backup_filename }}" + gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/redis-backup/defaults/main.yml b/ansible/roles/redis-backup/defaults/main.yml index 9f6055682a..6aacb354d6 100644 --- a/ansible/roles/redis-backup/defaults/main.yml +++ b/ansible/roles/redis-backup/defaults/main.yml @@ -7,4 +7,7 @@ learner_user: learning # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" \ No newline at end of file +nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" + +cloud_storage_redisbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_redisbackup_foldername: nodebb-redis-backup diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 5359a362c8..9f0c15a815 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -22,7 +22,7 @@ container_public_access: "off" blob_file_name: "{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -44,9 +44,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ nodebb_redis_backup_storage }}" - dest_file_name: "{{ redis_backup_file_name }}" + gcp_bucket_name: "{{ cloud_storage_redisbackup_bucketname }}" + gcp_path: "{{ cloud_storage_redisbackup_foldername }}/{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index b37398b874..88d17aba81 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -24,8 +24,8 @@ container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "azure" From 146c793dbbb3119dbfd4c887d819d04b117fe1f4 Mon Sep 17 00:00:00 2001 From: G33tha Date: Thu, 8 Dec 2022 13:42:45 +0530 Subject: [PATCH 126/203] updated knowlgbb service changes --- ansible/roles/stack-sunbird/defaults/main.yml | 5 +++++ pipelines/deploy/ContentFramework/Jenkinsfile | 1 + pipelines/upload/schema/dial/Jenkinsfile | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 8b600f04a1..0c8c86242d 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1048,3 +1048,8 @@ kong_desktop_device_consumer_names_for_opa: '["desktop"]' # Audience claim check is disabled as of now # List of keycloak clients as these can come in audience field of a JWT token # keycloak_allowed_aud: '"{{ keycloak_auth_server_url }}/realms/{{ keycloak_realm }}", "account", "realm-management"' + + +cloudstorage_relative_path_prefix_content: "CONTENT_STORAGE_BASE_PATH" +cloudstorage_relative_path_prefix_dial: "DIAL_STORAGE_BASE_PATH" +cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' \ No newline at end of file diff --git a/pipelines/deploy/ContentFramework/Jenkinsfile b/pipelines/deploy/ContentFramework/Jenkinsfile index c495bce266..a02c72eb69 100644 --- a/pipelines/deploy/ContentFramework/Jenkinsfile +++ b/pipelines/deploy/ContentFramework/Jenkinsfile @@ -44,6 +44,7 @@ node() { sh """ zip -r content-editor-artifact.zip ansible/content-editor cd ansible/content-editor + sudo npm install -g gulp npm install npm install promise gulp minifyJs diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index dd74b2f23f..553dcfd840 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -28,8 +28,8 @@ node() { rm -rf sunbird-dial-service git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ - ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" - ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/schemas\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" + ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 012961fa2520435d977ed27a8e58ff98139e82af Mon Sep 17 00:00:00 2001 From: G33tha Date: Thu, 8 Dec 2022 13:45:41 +0530 Subject: [PATCH 127/203] updated knowlgbb service changes --- pipelines/upload/schema/dial/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index 553dcfd840..a91956eaf1 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -28,7 +28,7 @@ node() { rm -rf sunbird-dial-service git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ - ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" + ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) From 5db7065a368ddd6aeb0df17500d177cea76376a0 Mon Sep 17 00:00:00 2001 From: anilgupta Date: Thu, 8 Dec 2022 13:55:03 +0530 Subject: [PATCH 128/203] Issue #KN-439 chore: Mering the changes from release-5.2.0-knowlg to release-5.2.0 --- ansible/roles/kong-api/defaults/main.yml | 167 +++++++++++++++++- .../content-service_application.conf | 11 +- .../templates/dial-service_application.conf | 10 ++ .../taxonomy-service_application.conf | 8 + 4 files changed, 194 insertions(+), 2 deletions(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index bb6e0c7c56..d7589ae22e 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -127,6 +127,7 @@ assessment_prefix: /assessment # Service URLs knowledge_mw_service_url: "http://knowledge-mw-service:5000" learning_service_url: "http://learner-service:9000" +dial_service_url: "http://dial-service:9000" vm_learning_service_url: "http://{{learningservice_ip}}:8080/learning-service" telemetry_service_url: "http://telemetry-service:9001" player_service_url: "http://player:3000" @@ -2494,7 +2495,7 @@ kong_apis: - name: publishContent uris: "{{ content_prefix }}/v1/publish" - upstream_url: "{{ knowledge_mw_service_url }}/v1/content/publish" + upstream_url: "{{ content_service_url }}/content/v3/publish" strip_uri: true plugins: - name: jwt @@ -9836,3 +9837,167 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false + +- name: releaseDialcodeContentV2 + uris: "{{ content_prefix }}/v2/dialcode/release" + upstream_url: "{{ content_service_url }}/content/v4/dialcode/release" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentUpdate + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: releaseDialcodeCollection + uris: "{{ collection_prefix }}/v1/dialcode/release" + upstream_url: "{{ content_service_url }}/collection/v4/dialcode/release" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentUpdate + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: publishContentV2 + uris: "{{ content_prefix }}/v2/publish" + upstream_url: "{{ content_service_url }}/content/v4/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: unlistedPublishContent + uris: "{{ content_prefix }}/v1/unlisted/publish" + upstream_url: "{{ content_service_url }}/content/v3/unlisted/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: unlistedPublishContentV2 + uris: "{{ content_prefix }}/v2/unlisted/publish" + upstream_url: "{{ content_service_url }}/content/v4/unlisted/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: publishCollection + uris: "{{ collection_prefix }}/v1/publish" + upstream_url: "{{ content_service_url }}/collection/v4/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: unlistedPublishCollection + uris: "{{ collection_prefix }}/v1/unlisted/publish" + upstream_url: "{{ content_service_url }}/collection/v4/unlisted/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: readDIALCodesBatchInfo + uris: "{{ dialcode_service_prefix }}/v2/read/batch" + upstream_url: "{{ dial_service_url }}/dialcode/v4/batch/read" + strip_uri: true + plugins: + - name: cors + - "{{ statsd_pulgin }}" + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: ip + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index fb5a2e7667..bb44a71828 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -382,7 +382,7 @@ content { } h5p { library { - path: "{{ h5p_library_path }}" + path: "{{ h5p_library_path | default('https://sunbirddev.blob.core.windows.net/sunbird-content-dev/h5p-standalone-1.3.4.zip') }}" } } copy { @@ -494,6 +494,7 @@ kafka { urls : "{{ kafka_urls }}" topic.send.enable : true topics.instruction : "{{ env_name }}.learning.job.request" + publish.request.topic : "{{ env_name }}.publish.job.request" } # DIAL Link Config @@ -637,3 +638,11 @@ collection { } plugin.media.base.url="{{ plugin_media_base_url }}" + +cloudstorage { + metadata.replace_absolute_path={{ cloudstorage_replace_absolute_path | default('false') }} + relative_path_prefix={{ cloudstorage_relative_path_prefix_content }} + metadata.list={{ cloudstorage_metadata_list }} + read_base_path="{{ cloudstorage_base_path }}" + write_base_path={{ valid_cloudstorage_base_urls }} +} diff --git a/ansible/roles/stack-sunbird/templates/dial-service_application.conf b/ansible/roles/stack-sunbird/templates/dial-service_application.conf index 745a8b9bfe..dd7b11dbeb 100644 --- a/ansible/roles/stack-sunbird/templates/dial-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/dial-service_application.conf @@ -150,6 +150,10 @@ system.config.table="system_config" publisher.keyspace.name="{{ env_name }}_dialcode_store" publisher.keyspace.table="publisher" +#QRCodes Configuration +qrcodes.keyspace.name="dialcodes" +qrcodes.keyspace.table="dialcode_batch" + #DIAL Code Generator Configuration dialcode.strip.chars="0" dialcode.length=6.0 @@ -191,3 +195,9 @@ jsonld { sb_schema = ["http://store.knowlg.sunbird.org/dial/specs/sb/schema.jsonld"] } +cloudstorage { + metadata.replace_absolute_path="{{ cloudstorage_replace_absolute_path | default('false') }}" + relative_path_prefix="{{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }}" + read_base_path="{{ cloudstorage_base_path }}" +} +cloud_storage_container="{{ cloud_storage_dial_bucketname | default('dial') }}" diff --git a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf index e1298a1b92..332206c502 100644 --- a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf @@ -398,3 +398,11 @@ objectcategorydefinition.keyspace="{{ lp_cassandra_keyspace_prefix }}_category_s # Framework master category validation Supported values are Yes/No master.category.validation.enabled="{{ master_category_validation_enabled | default('Yes') }}" + +cloudstorage { + metadata.replace_absolute_path={{ cloudstorage_replace_absolute_path | default('false') }} + relative_path_prefix={{ cloudstorage_relative_path_prefix_content }} + metadata.list={{ cloudstorage_metadata_list }} + read_base_path="{{ cloudstorage_base_path }}" + write_base_path={{ valid_cloudstorage_base_urls }} +} From 484731bcdbe3106a1c9f83a971369e9046267dce Mon Sep 17 00:00:00 2001 From: swayangjit Date: Thu, 8 Dec 2022 15:52:41 +0530 Subject: [PATCH 129/203] Issue #ED-354 chore: Update aab firebase upload logic. --- ansible/roles/firebase_deploy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/firebase_deploy/tasks/main.yml b/ansible/roles/firebase_deploy/tasks/main.yml index 0403ff7dab..8ebd281250 100644 --- a/ansible/roles/firebase_deploy/tasks/main.yml +++ b/ansible/roles/firebase_deploy/tasks/main.yml @@ -9,7 +9,7 @@ src: "uploadToGdrive.sh" dest: "./uploadToGdrive.sh" - name: Uploading build to {{ env_name }} firebase - shell: find ../ -maxdepth 1 -iregex ".*{{env_name}}.apk" -exec bash deployToFirebase.sh {} \; + shell: find ../ -maxdepth 1 -iregex ".*{{env_name}}.*.aab" -exec bash deployToFirebase.sh {} \; when: env_name!='production' - name: Uploading build to {{ env_name }} Gdrive shell: find ../ -maxdepth 1 -iregex ".*[0-9].apk" -exec bash uploadToGdrive.sh -v -r {} \; From b14d6de817d42373514f6013cdf83f8b4108ad17 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Thu, 8 Dec 2022 20:45:54 +0530 Subject: [PATCH 130/203] Release 5.1.0 vars change (#3658) --- ansible/artifacts-download.yml | 12 +- ansible/artifacts-upload.yml | 12 +- ansible/assets-upload.yml | 27 ++-- ansible/deploy-plugins.yml | 25 ++-- ansible/desktop-faq-upload.yml | 120 ++++++++++++++---- ansible/dial_upload-schema.yml | 19 +-- ansible/inventory/env/group_vars/all.yml | 1 - ansible/kp_upload-schema.yml | 19 +-- .../roles/cassandra-backup/defaults/main.yml | 10 -- ansible/roles/cassandra-backup/tasks/main.yml | 12 +- .../roles/cassandra-restore/defaults/main.yml | 7 - .../roles/cassandra-restore/tasks/main.yml | 14 +- .../roles/cert-templates/defaults/main.yml | 7 - ansible/roles/cert-templates/tasks/main.yml | 12 +- .../roles/desktop-deploy/defaults/main.yml | 7 - ansible/roles/desktop-deploy/tasks/main.yml | 10 +- .../roles/grafana-backup/defaults/main.yml | 7 - ansible/roles/grafana-backup/tasks/main.yml | 14 +- .../jenkins-backup-upload/defaults/main.yml | 8 -- .../jenkins-backup-upload/tasks/main.yml | 14 +- .../roles/mongodb-backup/defaults/main.yml | 8 -- ansible/roles/mongodb-backup/tasks/main.yml | 12 +- .../defaults/main.yml | 9 -- .../tasks/main.yml | 14 +- .../defaults/main.yml | 10 -- .../tasks/main.yml | 14 +- .../roles/postgresql-backup/defaults/main.yml | 8 -- .../roles/postgresql-backup/tasks/main.yml | 14 +- .../postgresql-restore/defaults/main.yml | 8 -- .../roles/postgresql-restore/tasks/main.yml | 14 +- .../prometheus-backup-v2/defaults/main.yml | 8 -- .../roles/prometheus-backup-v2/tasks/main.yml | 12 +- .../roles/prometheus-backup/defaults/main.yml | 9 -- .../roles/prometheus-backup/tasks/main.yml | 14 +- .../prometheus-restore/defaults/main.yml | 9 -- .../roles/prometheus-restore/tasks/main.yml | 14 +- ansible/roles/redis-backup/defaults/main.yml | 8 -- ansible/roles/redis-backup/tasks/main.yml | 14 +- ansible/uploadFAQs.yml | 62 +++++++-- .../UploadCollectionHierarchyCSV/config.xml | 11 +- .../jobs/UploadChatbotConfig/config.xml | 11 +- .../UploadCollectionHierarchyCSV/config.xml | 13 +- .../jobs/UploadDiscussionUIDocs/config.xml | 11 +- .../Kubernetes/jobs/UploadFAQs/config.xml | 9 -- .../jobs/UploadPortalLabel/config.xml | 9 -- .../Sunbird-RC/jobs/UploadRCSchema/config.xml | 9 -- pipelines/deploy/desktop-faq/Jenkinsfile | 2 +- pipelines/upload/chatbot/Jenkinsfile | 2 +- pipelines/upload/discussion-UI/Jenkinsfile | 2 +- pipelines/upload/faqs/Jenkinsfile | 2 +- pipelines/upload/portal-csv/Jenkinsfile | 2 +- 51 files changed, 300 insertions(+), 401 deletions(-) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 25869f80a3..46167180e4 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ artifacts_container }}" + blob_container_name: "{{ cloud_storage_artifacts_bucketname }}" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" storage_account_name: "{{ cloud_artifact_storage_accountname }}" @@ -32,9 +32,9 @@ tasks_from: download.yml vars: local_file_or_folder_path: "{{ artifact_path }}" - s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" - s3_path: "{{ artifacts_container }}/{{ artifact }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" - aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + s3_path: "{{ artifact }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_artifact_storage_accountname }}" + aws_secret_access_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "aws" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 31f2589a68..3bdbe73017 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ artifacts_container }}" + blob_container_name: "{{ cloud_storage_artifacts_bucketname }}" container_public_access: "off" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" @@ -33,9 +33,9 @@ tasks_from: upload.yml vars: local_file_or_folder_path: "{{ artifact_path }}" - s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" - s3_path: "{{ artifacts_container }}/{{ artifact }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" - aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + s3_path: "{{ artifact }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_artifact_storage_accountname }}" + aws_secret_access_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "aws" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 8adf3cae74..2d8d4b1bc2 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -2,20 +2,13 @@ - hosts: localhost vars_files: - ['{{inventory_dir}}/secrets.yml', 'secrets/{{env}}.yml'] - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - player_cdn_storage: "{{ player_cdn_container }}" # Azure tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ player_cdn_storage }}" + blob_container_name: "{{ cloud_storage_playercdn_bucketname }}" container_public_access: "container" blob_container_folder_path: "" storage_account_name: "{{ cloud_public_storage_accountname }}" @@ -40,11 +33,11 @@ block: - name: set common aws variables set_fact: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - s3_path: "{{ player_cdn_storage }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" + s3_path: "" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" - name: delete files and folders from s3 include_role: @@ -64,10 +57,10 @@ block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ player_cdn_storage }}" - dest_folder_path: "" - file_delete_pattern: "{{ player_cdn_storage }}/" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" + gcp_path: "" + file_delete_pattern: "" + - name: delete files and folders from gcloud storage include_role: diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index ae7f21637a..6d048b18c4 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -2,13 +2,6 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - plugin_storage: "{{ plugin_container_name }}" tasks: - name: rename env_domain in preview_cdn.html for CDN shell: | @@ -22,7 +15,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ plugin_storage }}" + blob_container_name: "{{ cloud_storage_content_bucketname }}" container_public_access: "container" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" @@ -169,10 +162,10 @@ block: - name: set common aws variables set_fact: - aws_default_region: "{{ aws_region }}" - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ cloud_public_storage_region }}" + s3_bucket_name: "{{ cloud_storage_content_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" tags: - always @@ -182,7 +175,7 @@ name: aws-cloud-storage tasks_from: delete-folder.yml vars: - s3_path: "{{ plugin_storage }}/{{ folder_name }}" + s3_path: "{{ folder_name }}" tags: - content-editor - collection-editor @@ -195,7 +188,7 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_path: "{{ plugin_storage }}/{{ folder_name }}" + s3_path: "{{ folder_name }}" local_file_or_folder_path: "{{ source_name }}" tags: - content-editor @@ -211,14 +204,14 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_path: "{{ plugin_storage }}/artefacts/content-player/content-player-{{ player_version_number }}.zip" + s3_path: "artefacts/content-player/content-player-{{ player_version_number }}.zip" local_file_or_folder_path: "{{ source_file_name }}" tags: - preview - block: - name: run the s3_copy.sh script - shell: "bash {{ s3_file_path }} {{ plugin_storage }} {{ source_file }} {{ aws_public_s3_bucket_name }}" + shell: "bash {{ s3_file_path }} {{ source_file }} {{ cloud_public_storage_accountname }}" async: 3600 poll: 10 environment: diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index d36b0e3721..3683202043 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -6,7 +6,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ upload_storage }}" + blob_container_name: "" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" @@ -20,6 +20,7 @@ tasks_from: blob-upload.yml vars: container_public_access: "container" + blob_container_name: "{{ cloud_storage_public_bucketname }}" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" tags: @@ -32,32 +33,58 @@ tasks_from: blob-upload.yml vars: container_public_access: "off" + blob_container_name: "{{ cloud_storage_label_bucketname }}" storage_account_name: "{{ cloud_private_storage_accountname }}" storage_account_key: "{{ cloud_private_storage_secret }}" tags: - upload-label - block: - - name: upload batch of files to azure storage + - name: upload batch of files to azure storage - chatbot include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: container_public_access: "container" + blob_container_name: "{{ cloud_storage_chatbot_bucketname }}" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" tags: - upload-chatbot-config - - upload-batch + + - block: + - name: upload batch of files to azure storage - csv-template + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + container_public_access: "container" + blob_container_name: "{{ cloud_storage_sourcing_bucketname }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-csv-template + + - block: + - name: upload batch of files to azure storage - discussion-ui + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + container_public_access: "container" + blob_container_name: "{{ cloud_storage_discussionui_bucketname }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-discussion-ui when: cloud_service_provider == "azure" +### GCP tasks ### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ upload_storage }}" - dest_file_name: "{{ destination_path }}" - dest_folder_path: "{{ destination_path }}" + gcp_path: "{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" tags: - always @@ -68,7 +95,7 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" tags: - upload-desktop-faq @@ -78,20 +105,39 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_private_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_label_bucketname }}" tags: - upload-label - block: - - name: upload batch of files to gcloud storage + - name: upload batch of files to gcloud storage - chatbot include_role: name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" tags: - upload-chatbot-config - - upload-batch + + - block: + - name: upload batch of files to gcloud storage - csv-template + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" + tags: + - upload-csv-template + + - block: + - name: upload batch of files to gcloud storage - discussion-ui + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" + tags: + - upload-discussion-ui when: cloud_service_provider == "gcloud" ######################## AWS tasks ######################################### @@ -100,9 +146,9 @@ block: - name: set common aws variables set_fact: - aws_default_region: "{{ aws_region }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" - s3_path: "{{ upload_storage }}/{{ destination_path }}" + s3_path: "{{ destination_path }}" tags: - always @@ -112,9 +158,9 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_public_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" tags: - upload-desktop-faq @@ -124,23 +170,45 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_private_s3_bucket_name }}" - aws_access_key_id: "{{ aws_private_bucket_access_key }}" - aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_label_bucketname }}" + aws_access_key_id: "{{ cloud_private_storage_accountname }}" + aws_secret_access_key: "{{ cloud_private_storage_secret }}" tags: - upload-label - block: - - name: upload folder to aws s3 + - name: upload folder to aws s3 - chatbot include_role: name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" tags: - upload-chatbot-config - - upload-batch - when: cloud_service_provider == "aws" - \ No newline at end of file + + - block: + - name: upload folder to aws s3 - csv-template + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-csv-template + + - block: + - name: upload folder to aws s3 - discussion-ui + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-discussion-ui + when: cloud_service_provider == "aws" diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 6572c12e55..757a80f6e5 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -2,13 +2,6 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - dial_plugin_storage: "{{ dial_plugin_container_name }}" tasks: - name: Create directories file: @@ -31,7 +24,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ dial_plugin_storage }}" + blob_container_name: "{{ cloud_storage_dial_bucketname }}" container_public_access: "blob" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "dial_schema_template_files" @@ -44,12 +37,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_dial_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "dial_schema_template_files" - s3_path: "{{ dial_plugin_storage }}/schemas/local" + s3_path: "schemas/local" when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 62cac6441d..8dfdd8a43d 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -128,7 +128,6 @@ cassandra_version: '3.9' cassandra_port: 9042 cassandra_rpc_address: 0.0.0.0 cassandra_restore_dir: "/home/{{ ansible_ssh_user }}/" -cassandra_backup_azure_container_name: cassandra-backup cassandra_backup_dir: /data/cassandra/backup ### Release 5.0.0 ### cassandra_multi_dc_enabled: false diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 2b09dac310..d12b74433d 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -2,20 +2,13 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - plugin_storage: "{{ plugin_container_name }}" tasks: - name: upload batch of files to azure storage include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ plugin_storage }}" + blob_container_name: "{{ cloud_storage_content_bucketname }}" container_public_access: "container" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "{{ source_name }}" @@ -28,12 +21,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_content_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ source_name }}" - s3_path: "{{ plugin_storage }}/schemas/local" + s3_path: "schemas/local" when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage diff --git a/ansible/roles/cassandra-backup/defaults/main.yml b/ansible/roles/cassandra-backup/defaults/main.yml index dffec63096..4481570cc6 100644 --- a/ansible/roles/cassandra-backup/defaults/main.yml +++ b/ansible/roles/cassandra-backup/defaults/main.yml @@ -1,14 +1,4 @@ cassandra_root_dir: '/etc/cassandra' data_dir: '/var/lib/cassandra/data' - -cassandra_backup_azure_container_name: core-cassandra - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" - cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index 304385515c..ce0e646662 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -37,7 +37,7 @@ name: azure-cloud-storage tasks_from: upload-using-azcopy.yml vars: - blob_container_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + blob_container_name: "{{ cloud_storage_cassandrabackup_foldername }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" @@ -51,11 +51,11 @@ tasks_from: upload-folder.yml vars: local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - s3_path: "{{ cassandra_backup_storage }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + s3_path: "{{ cloud_storage_cassandrabackup_foldername }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/cassandra-restore/defaults/main.yml b/ansible/roles/cassandra-restore/defaults/main.yml index 834c103d58..9ac0c38f95 100644 --- a/ansible/roles/cassandra-restore/defaults/main.yml +++ b/ansible/roles/cassandra-restore/defaults/main.yml @@ -1,11 +1,4 @@ user_home: "/home/{{ ansible_ssh_user }}/" -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" - cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 304c9b8b09..4bd8c05991 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -11,11 +11,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ cassandra_backup_storage }}" + blob_container_name: "{{ cloud_storage_cassandrabackup_foldername }}" blob_file_name: "{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -24,12 +24,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" - s3_path: "{{ cassandra_backup_storage }}/{{ cassandra_restore_gzip_file_name }}" + s3_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_gzip_file_name }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/cert-templates/defaults/main.yml b/ansible/roles/cert-templates/defaults/main.yml index 1ca7f44958..c8710dd9d9 100644 --- a/ansible/roles/cert-templates/defaults/main.yml +++ b/ansible/roles/cert-templates/defaults/main.yml @@ -2,10 +2,3 @@ certs_badge_upload_retry_count: 3 certs_badge_criteria: "" certs_badge_batch_id: "" certs_badge_key_id: "" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -cert_service_storage: "{{ cert_service_container_name }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 0700f1e61a..0caf2b1bfe 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -36,7 +36,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ cert_service_storage }}" + blob_container_name: "{{ cloud_storage_certservice_bucketname }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" @@ -49,12 +49,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_private_s3_bucket_name }}" - aws_access_key_id: "{{ aws_private_bucket_access_key }}" - aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_certservice_bucketname }}" + aws_access_key_id: "{{ cloud_private_storage_accountname }}" + aws_secret_access_key: "{{ cloud_private_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" - s3_path: "{{ cert_service_storage }}" + s3_path: "" when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index 06bdd6fe1f..2cff6657c7 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -1,10 +1,3 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" offline_installer_container_name: "{{env}}-offlineinstaller" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -offline_installer_storage: "{{ offline_installer_container_name }}" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 70fa94cb1d..ba077b778f 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -53,7 +53,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ offline_installer_storage }}" + blob_container_name: "{{ cloud_storage_offlineinstaller_bucketname }}" container_public_access: "blob" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" @@ -81,10 +81,10 @@ block: - name: set common aws variables set_fact: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" - name: upload batch of files to aws s3 include_role: diff --git a/ansible/roles/grafana-backup/defaults/main.yml b/ansible/roles/grafana-backup/defaults/main.yml index b6850bee97..70bd76ff82 100644 --- a/ansible/roles/grafana-backup/defaults/main.yml +++ b/ansible/roles/grafana-backup/defaults/main.yml @@ -4,13 +4,6 @@ grafana_data_dir: /var/dockerdata/grafana/grafana.db # Override these values in group_vars sunbird_management_storage_account_name: sunbird_management_storage_account_key: '' -grafana_backup_azure_container_name: grafana-backup -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" cloud_storage_grafanabackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_grafanabackup_foldername: 'grafana-backup' diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index c309d409fb..90dc3526ca 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -24,12 +24,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ grafana_backup_storage }}" + blob_container_name: "{{ cloud_storage_grafanabackup_foldername }}" container_public_access: "off" blob_file_name: "{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -37,12 +37,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_grafanabackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" - s3_path: "{{ grafana_backup_storage }}/{{ grafana_backup_gzip_file_name }}" + s3_path: "{{ cloud_storage_grafanabackup_foldername }}/{{ grafana_backup_gzip_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml index d9c5c9f58a..9fd90050bf 100644 --- a/ansible/roles/jenkins-backup-upload/defaults/main.yml +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -1,15 +1,7 @@ jenkins_user: jenkins jenkins_group: jenkins jenkins_backup_base_dir: /var/lib/jenkins/jenkins-backup -jenkins_backup_azure_container_name: jenkins-backup jenkins_backup_max_delay_in_days: 1 -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" - cloud_storage_jenkinsbackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_jenkinsbackup_foldername: 'jenkins-backup' diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index 018a9498f1..89d8f3e29c 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -17,12 +17,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ jenkins_backup_storage }}" + blob_container_name: "{{ cloud_storage_jenkinsbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -30,12 +30,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_jenkinsbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" - s3_path: "{{ jenkins_backup_storage }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" + s3_path: "{{ cloud_storage_jenkinsbackup_foldername }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index 1d54a69541..547137f0ca 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -1,12 +1,4 @@ mongo_backup_dir: '/tmp/mongo-backup' -mongo_backup_azure_container_name: "mongodb-backup" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" cloud_storage_mongodbbackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_mongodbbackup_foldername: 'mongodb-backup' diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 94b157648a..f51216b14f 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -19,12 +19,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ mongo_backup_storage }}" + blob_container_name: "{{ cloud_storage_mongodbbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -32,10 +32,10 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_mongodbbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" s3_path: "{{ mongo_backup_storage }}/{{ mongo_backup_file_name }}.tar.gz" when: cloud_service_provider == "aws" diff --git a/ansible/roles/postgres-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-managed-service-backup/defaults/main.yml index 6af37d7f96..ed62efd66b 100644 --- a/ansible/roles/postgres-managed-service-backup/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-backup/defaults/main.yml @@ -1,7 +1,5 @@ postgresql_user: postgres postgresql_backup_dir: /tmp/postgres -postgresql_backup_azure_container_name: postgresql-backup - db_name: db: ['keycloak', 'api_manager_{{ postgres_env }}', 'quartz'] @@ -9,12 +7,5 @@ postgres_admin_user: "{{sunbird_pg_user}}" postgres_hostname: "{{groups['postgresql-master-1'][0]}}" postgres_password: "{{postgres_password}}" -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" - cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index 660814d9bd..ba101e2509 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -46,12 +46,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -59,12 +59,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}.zip" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}.zip" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/postgres-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-managed-service-restore/defaults/main.yml index 41164b445c..8893425000 100644 --- a/ansible/roles/postgres-managed-service-restore/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-restore/defaults/main.yml @@ -1,6 +1,4 @@ postgresql_restore_dir: /tmp/postgres-restore -postgres_backup_azure_container_name: postgresql-backup - db: name: ['keycloak', 'api_manager_{{ postgres_env }}', 'quartz'] role: ['keycloak', 'api_manager_{{ postgres_env }}', 'quartz'] @@ -13,13 +11,5 @@ postgres_password: postgres_hostname: postgres_env: -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" - - cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 8efa3dd561..c3d518db56 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -14,11 +14,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgres_backup_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" blob_file_name: "{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -26,12 +26,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_management_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" - s3_path: "{{ postgres_backup_storage }}/{{ postgres_backup_filename }}" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgres_backup_filename }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index 30902eac17..341b1c23ed 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -1,13 +1,5 @@ postgresql_backup_dir: /tmp/postgresql-backup postgresql_user: postgres -postgresql_backup_azure_container_name: postgresql-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 6710e49503..5b3303bf97 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -18,12 +18,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -31,12 +31,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml index 2bcc525469..feeed7d6bb 100644 --- a/ansible/roles/postgresql-restore/defaults/main.yml +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -3,14 +3,6 @@ postgresql_user: postgres postgresql_port: 5432 postgresql_cluster_version: 9.5 postgresql_cluster_name: main -postgresql_restore_azure_container_name: postgresql-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index b3411c5445..e076590f23 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -9,11 +9,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgresql_restore_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" blob_file_name: "{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -21,12 +21,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" - s3_path: "{{ postgres_backup_storage }}/{{ postgresql_restore_gzip_file_name }}" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_restore_gzip_file_name }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/prometheus-backup-v2/defaults/main.yml b/ansible/roles/prometheus-backup-v2/defaults/main.yml index 430c9df4cb..919dcd82d9 100644 --- a/ansible/roles/prometheus-backup-v2/defaults/main.yml +++ b/ansible/roles/prometheus-backup-v2/defaults/main.yml @@ -1,13 +1,5 @@ --- # defaults file for ansible/roles/prometheus-backup-v2 -prometheus_backup_azure_container_name: prometheus-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 9b25fc465c..4a65bb6f8f 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -26,7 +26,7 @@ blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -34,12 +34,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + s3_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/prometheus-backup/defaults/main.yml b/ansible/roles/prometheus-backup/defaults/main.yml index 3bba75124c..e5a4ecdcb3 100644 --- a/ansible/roles/prometheus-backup/defaults/main.yml +++ b/ansible/roles/prometheus-backup/defaults/main.yml @@ -1,17 +1,8 @@ prometheus_backup_dir: /tmp/prometheus-backup -prometheus_backup_azure_container_name: prometheus-backup - # Set these vars per environment as show in example below # Override these values in group_vars backup_storage_name: backups backup_storage_key: '' -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" - cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 14cc74a41a..10d8e2fb3b 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -30,12 +30,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ prometheus_backup_storage }}" + blob_container_name: "{{ cloud_storage_prometheusbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -43,12 +43,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" - s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_gzip_file_name }}" + s3_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_gzip_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/prometheus-restore/defaults/main.yml b/ansible/roles/prometheus-restore/defaults/main.yml index 580e865060..f5f1511216 100644 --- a/ansible/roles/prometheus-restore/defaults/main.yml +++ b/ansible/roles/prometheus-restore/defaults/main.yml @@ -1,12 +1,3 @@ prometheus_backup_dir: /tmp/prometheus-backup -prometheus_backup_azure_container_name: prometheus-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" - cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 0c9b0749a9..440b777fe4 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -7,11 +7,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ prometheus_backup_storage }}" + blob_container_name: "{{ cloud_storage_prometheusbackup_foldername }}" blob_file_name: "{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -19,12 +19,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" - s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_filename }}" + s3_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_filename }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/redis-backup/defaults/main.yml b/ansible/roles/redis-backup/defaults/main.yml index 6aacb354d6..54b7c60a89 100644 --- a/ansible/roles/redis-backup/defaults/main.yml +++ b/ansible/roles/redis-backup/defaults/main.yml @@ -1,13 +1,5 @@ redis_backup_dir: /tmp/redis-backup -nodebb_redis_backup_azure_container_name: nodebb-redis-backup learner_user: learning -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" - cloud_storage_redisbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_redisbackup_foldername: nodebb-redis-backup diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 9f0c15a815..f1cf35622f 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -18,12 +18,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ nodebb_redis_backup_storage }}" + blob_container_name: "{{ cloud_storage_redisbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -31,12 +31,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_redisbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" - s3_path: "{{ nodebb_redis_backup_storage }}/{{ redis_backup_file_name }}" + s3_path: "{{ cloud_storage_redisbackup_foldername }}/{{ redis_backup_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 88d17aba81..cf90e343d1 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -20,7 +20,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ upload_storage }}" + blob_container_name: "{{ cloud_storage_public_bucketname }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" @@ -35,12 +35,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_public_bucketname }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" - s3_path: "{{ upload_storage }}" + s3_path: "" with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "aws" @@ -50,13 +50,59 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ upload_storage }}" + gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" dest_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "gcloud" tags: - upload-faqs + +- hosts: localhost + vars_files: + - "{{inventory_dir}}/secrets.yml" + tasks: + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ cloud_storage_content_bucketname }}" + container_public_access: "container" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "azure" + + - name: upload batch of files to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ cloud_storage_content_bucketname }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + s3_path: "" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "aws" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + dest_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "gcloud" + tags: - upload-RC-schema diff --git a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 0236cab0eb..1363bd3fcf 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -83,7 +83,7 @@ return """<b>This parameter is not used</b>""" - upload-batch + upload-csv-template @@ -102,15 +102,6 @@ return """<b>This parameter is not used</b>""" false - - upload_storage - - - - content-service - - - source_path diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml index defc3a0ddd..79d963a0e7 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml @@ -84,15 +84,6 @@ return """<b>This parameter is not used</b>""" master false - - upload_storage - - - - chatbot - - - source_path @@ -169,4 +160,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index d87aac4ee3..72d310489e 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -83,7 +83,7 @@ return """<b>This parameter is not used</b>""" - upload-batch + upload-csv-template @@ -102,15 +102,6 @@ return """<b>This parameter is not used</b>""" false - - upload_storage - - - - sourcing - - - source_path @@ -175,4 +166,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml index a801645925..66d749e86a 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml @@ -159,15 +159,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - discussion-ui - - - source_path @@ -191,7 +182,7 @@ return """<b>This parameter is not used</b>""" - upload-batch + upload-discussion-ui diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml index 85b7c81efb..9a6fccc4b5 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml @@ -71,15 +71,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - public - - - tag diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml index a75d9ee220..a1b8680986 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml @@ -71,15 +71,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - label - - - destination_path diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml index ea47b8d14e..1ff2974d6d 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml @@ -71,15 +71,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - sunbird-content-dev - - - tag diff --git a/pipelines/deploy/desktop-faq/Jenkinsfile b/pipelines/deploy/desktop-faq/Jenkinsfile index d282ec2884..1b1a8d7f0d 100644 --- a/pipelines/deploy/desktop-faq/Jenkinsfile +++ b/pipelines/deploy/desktop-faq/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index c97597c44c..da0774f382 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -38,7 +38,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/discussion-UI/Jenkinsfile b/pipelines/upload/discussion-UI/Jenkinsfile index c4d794fb3e..067158e445 100644 --- a/pipelines/upload/discussion-UI/Jenkinsfile +++ b/pipelines/upload/discussion-UI/Jenkinsfile @@ -30,7 +30,7 @@ node() { unzip ${artifact} """ ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/pipelines/upload/faqs/Jenkinsfile b/pipelines/upload/faqs/Jenkinsfile index 4f18801b4e..f44c1b5020 100644 --- a/pipelines/upload/faqs/Jenkinsfile +++ b/pipelines/upload/faqs/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/uploadFAQs.yml" - ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"upload_storage=${params.upload_storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags ${params.tag} --extra-vars \" source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/portal-csv/Jenkinsfile b/pipelines/upload/portal-csv/Jenkinsfile index 6e8453d3e2..502fadcdbb 100644 --- a/pipelines/upload/portal-csv/Jenkinsfile +++ b/pipelines/upload/portal-csv/Jenkinsfile @@ -27,7 +27,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 8f5efcd39ec159cd90593895d679bcf25c9d85ec Mon Sep 17 00:00:00 2001 From: Kumar Gauraw Date: Mon, 12 Dec 2022 15:22:04 +0530 Subject: [PATCH 131/203] Issue #IQ-193 feat: updated config of assessment --- ansible/roles/stack-sunbird/defaults/main.yml | 12 +++++- .../assessment-service_application.conf | 39 ++++++++++--------- 2 files changed, 32 insertions(+), 19 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 0c8c86242d..036fda51bd 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1052,4 +1052,14 @@ kong_desktop_device_consumer_names_for_opa: '["desktop"]' cloudstorage_relative_path_prefix_content: "CONTENT_STORAGE_BASE_PATH" cloudstorage_relative_path_prefix_dial: "DIAL_STORAGE_BASE_PATH" -cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' \ No newline at end of file +cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' + +### inQuiry assessment service default values +inquiry_schema_path: "{{ kp_schema_base_path }}" +inquiry_kafka_urls: "{{ kafka_urls }}" +inquiry_assessment_import_kafka_topic_name: "{{ env_name }}.object.import.request" +inquiry_assessment_publish_kafka_topic_name: "{{ env_name }}.assessment.publish.request" +inquiry_cassandra_connection: "{{ lp_cassandra_connection }}" +inquiry_cassandra_keyspace_prefix: "{{ lp_cassandra_keyspace_prefix }}" +inquiry_redis_host: "{{ sunbird_lp_redis_host }}" +inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf index 60d129907a..c06a44f2d8 100644 --- a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf @@ -336,14 +336,13 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path="{{ kp_schema_base_path | default('/home/sunbird/assessment-service-1.0-SNAPSHOT/schemas')}}" +schema.base_path="{{ inquiry_schema_path | default('/home/sunbird/assessment-service-1.0-SNAPSHOT/schemas')}}" # Cassandra Configuration -cassandra.lp.connection="{{ lp_cassandra_connection }}" -content.keyspace = "{{ lp_cassandra_keyspace_prefix }}_content_store" +cassandra.lp.connection="{{ inquiry_cassandra_connection }}" # Redis Configuration -redis.host="{{ sunbird_lp_redis_host }}" +redis.host="{{ inquiry_redis_host }}" redis.port=6379 redis.maxConnections=128 @@ -383,30 +382,24 @@ languageCode { telugu : "te" } -cloud_storage_type: "{{ cloud_service_provider }}" -cloud_storage_key: "{{ cloud_public_storage_accountname }}" -cloud_storage_secret: "{{ cloud_public_storage_secret }}" -cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" -cloud_storage_container: "{{ cloud_storage_content_bucketname }}" - kafka { - urls : "{{ kafka_urls }}" + urls : "{{ inquiry_kafka_urls }}" topic.send.enable : true - topics.instruction : "{{ env_name }}.assessment.publish.request" + topics.instruction : "{{ inquiry_assessment_publish_kafka_topic_name }}" } -objectcategorydefinition.keyspace="{{ lp_cassandra_keyspace_prefix }}_category_store" -question.keyspace="{{ lp_cassandra_keyspace_prefix }}_question_store" -questionset.keyspace="{{ lp_cassandra_keyspace_prefix }}_hierarchy_store" +objectcategorydefinition.keyspace="{{ inquiry_cassandra_keyspace_prefix }}_category_store" +question.keyspace="{{ inquiry_cassandra_keyspace_prefix }}_question_store" +questionset.keyspace="{{ inquiry_cassandra_keyspace_prefix }}_hierarchy_store" composite { search { - url : "{{ sunbird_search_service_api_base_url }}/v3/search" + url : "{{ inquiry_search_service_base_url }}" } } import { request_size_limit : 300 - output_topic_name : "{{ env_name }}.object.import.request" + output_topic_name : "{{ inquiry_assessment_import_kafka_topic_name }}" required_props { question : ["name", "code", "mimeType", "framework", "channel"] questionset : ["name", "code", "mimeType", "framework", "channel"] @@ -426,4 +419,14 @@ assessment.copy.props_to_remove=["downloadUrl", "artifactUrl", "variants", "LastPublishedBy", "rejectReasons", "rejectComment", "gradeLevel", "subject", "medium", "board", "topic", "purpose", "subtopic", "contentCredits", "owner", "collaborators", "creators", "contributors", "badgeAssertions", "dialcodes", - "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl"] \ No newline at end of file + "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl"] + +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" + +cloudstorage { + metadata.replace_absolute_path={{ cloudstorage_replace_absolute_path | default('false') }} + metadata.list={{ cloudstorage_metadata_list }} + relative_path_prefix="{{ cloudstorage_relative_path_prefix | default('CLOUD_STORAGE_BASE_PATH') }}" + read_base_path="{{ cloudstorage_base_path }}" + write_base_path={{ valid_cloudstorage_base_urls }} +} \ No newline at end of file From 08abafc3e0ae706d7ad8b3715cbac627f3579271 Mon Sep 17 00:00:00 2001 From: Ashwiniev95 <52481775+Ashwiniev95@users.noreply.github.com> Date: Mon, 12 Dec 2022 18:03:09 +0530 Subject: [PATCH 132/203] Add a new variable (#3661) --- ansible/roles/ml-analytics-service/defaults/main.yml | 1 + ansible/roles/ml-analytics-service/templates/config.j2 | 1 + 2 files changed, 2 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index a480b01ccd..6c9ea27e79 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -112,3 +112,4 @@ cloud_storage_telemetry_bucketname: "{{ cloud_storage_telemetry_bucketname }}" cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" cloud_private_storage_region: "{{ cloud_private_storage_region }}" cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" +ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 70160c64c0..aa365dbb0c 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -232,6 +232,7 @@ survey_sub_ids = {{ml_analytics_survey_submission_id_filepath}} survey_druid_data = {{ml_analytics_survey_batchupdate_output_dir}} +program_text_file = {{ml_analytics_project_program}} [SLACK] From 0c14ae91c89dec4ce69ec3e7cdb728d992c14db4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 13 Dec 2022 16:22:16 +0530 Subject: [PATCH 133/203] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6c9ea27e79..6c40b8b6c0 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -102,7 +102,7 @@ ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" -ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" +ml_analytics_cname_url: "{{ cloud_storage_url }}/{{ cloud_storage_samiksha_bucketname }}" ml_Cloud_secret_json_file: "cloud_secrets.json" ml_Cloud_Secrets: account_name: "{{ cloud_private_storage_accountname }}" @@ -113,3 +113,4 @@ cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" cloud_private_storage_region: "{{ cloud_private_storage_region }}" cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" +ml_analytics_projects_program_filename: "{{ config_path }}/projects/program_ids.txt" From f4f545de049adfdf53501fd59669c17acc6cf73b Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 13 Dec 2022 16:23:11 +0530 Subject: [PATCH 134/203] Update shell_script_config.j2 --- .../ml-analytics-service/templates/shell_script_config.j2 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 b/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 index 6ecdeba31a..97e35a4db0 100644 --- a/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 +++ b/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 @@ -1,2 +1 @@ -mongo_url={{ ml_analytics_mongodb_url }} -mongo_db_name={{ ml_analytics_mongo_db_name }} +projects_program_filename={{ ml_analytics_projects_program_filename }} From ddeeff6721ef6460b7de5fd40d4f4c299c755aa7 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 13 Dec 2022 17:01:35 +0530 Subject: [PATCH 135/203] Update Ingestion Spec --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6c40b8b6c0..06fb010cdf 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -82,7 +82,7 @@ ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/pro ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCountPrglevel/" ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" ml_analytics_survey_cloud_blob_path : "survey/status/" -ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program"]},"metricsSpec":[]}}}' +ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program","state_code","school_code","district_code","block_code","cluster_code"]},"metricsSpec":[]}}}' ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" ml_analytics_program_dashboard_cloud_blob_path: "{{ ml_program_blob_path | default('') }}" From fb320160c4fa076197b974d9d6308d21239bb813 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Wed, 14 Dec 2022 09:36:22 +0530 Subject: [PATCH 136/203] Remove SAS token --- ansible/roles/ml-analytics-service/templates/config.j2 | 2 -- 1 file changed, 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index aa365dbb0c..c0ec68fd3e 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -192,8 +192,6 @@ bucket_name = {{ cloud_storage_telemetry_bucketname }} account_name = {{ cloud_private_storage_accountname }} -sas_token = {{ cloud_private_storage_secret }} - container_name = {{ cloud_storage_telemetry_bucketname }} account_key = {{ cloud_private_storage_secret }} From c0873da4e7fbcba65ca315179f98e5dd26c39c00 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Wed, 14 Dec 2022 13:06:34 +0530 Subject: [PATCH 137/203] Missing vars update (#3666) --- ansible/roles/ml-analytics-service/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 06fb010cdf..33d37d02a6 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -83,8 +83,8 @@ ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCo ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" ml_analytics_survey_cloud_blob_path : "survey/status/" ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program","state_code","school_code","district_code","block_code","cluster_code"]},"metricsSpec":[]}}}' -ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" -ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" +ml_slack_token: "{{ ml_analytics_slack_token | default('') }}" +ml_slack_channel: "{{ ml_analytics_slack_channel | default('') }}" ml_analytics_program_dashboard_cloud_blob_path: "{{ ml_program_blob_path | default('') }}" ml_druid_query_data: "{{ ml_druid_query | default('') }}" ml_program_dashboard_data: "{{ ml_program_data | default('') }}" From c8dd939c932f8989e7874f4d70601ca85d944111 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Wed, 14 Dec 2022 14:32:50 +0530 Subject: [PATCH 138/203] LR-110 removed un-necessary prefix --- .../sunbird-RC/registry/schemas/TrainingCertificate.json | 2 +- utils/sunbird-RC/schema/credential_template.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json index 5187b08e81..84dc1d5429 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json @@ -69,6 +69,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{ upstream_url }}/schema/credential_template.json" + "credentialTemplate": "{{ upstream_url }}/schema/credential_template.json" } } diff --git a/utils/sunbird-RC/schema/credential_template.json b/utils/sunbird-RC/schema/credential_template.json index f96a3c0528..60477810b0 100644 --- a/utils/sunbird-RC/schema/credential_template.json +++ b/utils/sunbird-RC/schema/credential_template.json @@ -1,7 +1,7 @@ { "@context": [ - "https://{{ upstream_url }}/schema/v1_context.json", - "https://{{ upstream_url }}/schema/sunbird_context.json" + "{{ upstream_url }}/schema/v1_context.json", + "{{ upstream_url }}/schema/sunbird_context.json" ], "type": [ "VerifiableCredential" From 9c6b12f8cdb789744ef74c762899cc6130053450 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Wed, 14 Dec 2022 15:03:51 +0530 Subject: [PATCH 139/203] Release 5.1.0 - csp changes (#3667) --- ansible/assets-upload.yml | 1 - ansible/deploy-plugins.yml | 1 - .../tasks/delete-using-azcopy.yml | 12 +++++++++++- .../tasks/upload-using-azcopy.yml | 12 +++++++++++- ansible/roles/cassandra-backup/tasks/main.yml | 2 +- .../postgres-managed-service-restore/tasks/main.yml | 2 +- 6 files changed, 24 insertions(+), 6 deletions(-) diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 2d8d4b1bc2..09e7df6ceb 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -13,7 +13,6 @@ blob_container_folder_path: "" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" - storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - name: delete files and folders from azure storage using azcopy include_role: diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 6d048b18c4..a78ce1c640 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -19,7 +19,6 @@ container_public_access: "container" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" - storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always no_log: True diff --git a/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml index 236169e86c..196de9c9b3 100644 --- a/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml @@ -1,6 +1,16 @@ --- +- name: generate SAS token for azcopy + shell: | + sas_expiry=`date -u -d "1 hour" '+%Y-%m-%dT%H:%MZ'` + sas_token=?`az storage container generate-sas -n {{ blob_container_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }} --https-only --permissions dlrw --expiry $sas_expiry -o tsv` + echo $sas_token + register: sas_token + +- set_fact: + container_sas_token: "{{ sas_token.stdout}}" + - name: delete files and folders from azure storage using azcopy - shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ container_sas_token }}' --recursive" environment: AZCOPY_CONCURRENT_FILES: "10" async: 10800 diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index affbc8c002..95da584c9b 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -1,4 +1,14 @@ --- +- name: generate SAS token for azcopy + shell: | + sas_expiry=`date -u -d "1 hour" '+%Y-%m-%dT%H:%MZ'` + sas_token=?`az storage container generate-sas -n {{ blob_container_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }} --https-only --permissions dlrw --expiry $sas_expiry -o tsv` + echo $sas_token + register: sas_token + +- set_fact: + container_sas_token: "{{ sas_token.stdout}}" + - name: create container in azure storage if it doesn't exist include_role: name: azure-cloud-storage @@ -6,7 +16,7 @@ when: create_container == True - name: upload files and folders to azure storage using azcopy - shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ container_sas_token }}' --recursive" environment: AZCOPY_CONCURRENT_FILES: "10" async: 10800 diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index ce0e646662..0e5ae87477 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -42,7 +42,7 @@ blob_container_folder_path: "" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_sas_token: "{{ azure_management_storage_account_sas }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload backup to S3 diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index c3d518db56..58d2c53482 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -26,7 +26,7 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ cloud_storage_management_bucketname }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" aws_access_key_id: "{{ cloud_management_storage_accountname }}" aws_secret_access_key: "{{ cloud_management_storage_secret }}" aws_default_region: "{{ cloud_public_storage_region }}" From 061a993f8c85635eaad2388090aa5725b0cea239 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Wed, 14 Dec 2022 15:26:06 +0530 Subject: [PATCH 140/203] Add logging level configuration for registry service (#3669) Add logging level configuration for registry service --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index c582ae8941..433edbfc92 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -56,6 +56,7 @@ rccoreenv: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: {{ registry_listener_security_protocol_map|default('INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT')}} KAFKA_INTER_BROKER_LISTENER_NAME: {{ registry_inter_broker_listener_name|default('INTERNAL')}} KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: {{ registry_offsets_topic_replication_factor|default('1')}} + logging.level.root : {{ registry_logging_level|default('INFO')}} {# The below should get enabled once the service has probes implemented #} {# {{ registry_liveness_readiness | to_nice_yaml }} #} From fadcdc00c7c0d2199d89b84df21076d7994d4c7b Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Wed, 14 Dec 2022 16:51:27 +0530 Subject: [PATCH 141/203] LR-110 added default context --- utils/sunbird-RC/schema/credential_template.json | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/sunbird-RC/schema/credential_template.json b/utils/sunbird-RC/schema/credential_template.json index 60477810b0..a999eca8da 100644 --- a/utils/sunbird-RC/schema/credential_template.json +++ b/utils/sunbird-RC/schema/credential_template.json @@ -1,5 +1,6 @@ { "@context": [ + "https://www.w3.org/2018/credentials/v1", "{{ upstream_url }}/schema/v1_context.json", "{{ upstream_url }}/schema/sunbird_context.json" ], From 95a83a1a3c3000dbeb4d823370b057e7c01bf2e1 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 15 Dec 2022 15:53:08 +0530 Subject: [PATCH 142/203] Update config.j2 (#3673) --- .../ml-analytics-service/defaults/main.yml | 12 ++++++------ .../ml-analytics-service/templates/config.j2 | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 33d37d02a6..65274b1182 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -105,12 +105,12 @@ ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "{{ cloud_storage_url }}/{{ cloud_storage_samiksha_bucketname }}" ml_Cloud_secret_json_file: "cloud_secrets.json" ml_Cloud_Secrets: - account_name: "{{ cloud_private_storage_accountname }}" - account_key: "{{ cloud_private_storage_secret }}" -cloud_private_storage_accountname: "{{ cloud_private_storage_accountname }}" + account_name: "{{ cloud_public_storage_accountname }}" + account_key: "{{ cloud_public_storage_secret }}" +cloud_public_storage_accountname: "{{ cloud_public_storage_accountname }}" cloud_storage_telemetry_bucketname: "{{ cloud_storage_telemetry_bucketname }}" -cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" -cloud_private_storage_region: "{{ cloud_private_storage_region }}" -cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" +cloud_public_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_public_storage_region: "{{ cloud_public_storage_region }}" +cloud_public_storage_endpoint: "{{ cloud_public_storage_endpoint }}" ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" ml_analytics_projects_program_filename: "{{ config_path }}/projects/program_ids.txt" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index c0ec68fd3e..52927ec957 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -154,13 +154,13 @@ survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log [ORACLE] -endpoint_url = {{ cloud_private_storage_endpoint }} +endpoint_url = {{ cloud_public_storage_endpoint }} -access_key = {{ cloud_private_storage_accountname }} +access_key = {{ cloud_public_storage_accountname }} -secret_access_key = {{ cloud_private_storage_secret }} +secret_access_key = {{ cloud_public_storage_secret }} -region_name = {{ cloud_private_storage_region }} +region_name = {{ cloud_public_storage_region }} bucket_name = {{ cloud_storage_telemetry_bucketname }} @@ -178,11 +178,11 @@ bucket_name = {{ cloud_storage_telemetry_bucketname }} service_name = S3 -access_key = {{ cloud_private_storage_accountname }} +access_key = {{ cloud_public_storage_accountname }} -secret_access_key = {{ cloud_private_storage_secret }} +secret_access_key = {{ cloud_public_storage_secret }} -region_name = {{ cloud_private_storage_region }} +region_name = {{ cloud_public_storage_region }} bucket_name = {{ cloud_storage_telemetry_bucketname }} @@ -190,11 +190,11 @@ bucket_name = {{ cloud_storage_telemetry_bucketname }} [AZURE] -account_name = {{ cloud_private_storage_accountname }} +account_name = {{ cloud_public_storage_accountname }} container_name = {{ cloud_storage_telemetry_bucketname }} -account_key = {{ cloud_private_storage_secret }} +account_key = {{ cloud_public_storage_secret }} {% endif %} From a807916a3952fcbcced5141da58e42b5e2444b14 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 15 Dec 2022 20:54:11 +0530 Subject: [PATCH 143/203] fix: ED-573 jenkins job to kill spark jobs Signed-off-by: Keshav Prasad --- ansible/kill_spark_jobs.yaml | 12 ++++++ pipelines/ops/kill-spark-jobs/Jenkinsfile | 51 +++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 ansible/kill_spark_jobs.yaml create mode 100644 pipelines/ops/kill-spark-jobs/Jenkinsfile diff --git a/ansible/kill_spark_jobs.yaml b/ansible/kill_spark_jobs.yaml new file mode 100644 index 0000000000..01c01c5bcf --- /dev/null +++ b/ansible/kill_spark_jobs.yaml @@ -0,0 +1,12 @@ +--- +- hosts: spark + become: yes + tasks: + - name: get pids of job manager which may be orphaned + shell: ps -ef | grep [j]ob. | awk '{print $2}' + register: pids_of_jobmanager + + - name: kill the orphan job manager pids + shell: "kill -9 {{ item | int }}" + with_items: + - "{{ pids_of_jobmanager.stdout_lines }}" diff --git a/pipelines/ops/kill-spark-jobs/Jenkinsfile b/pipelines/ops/kill-spark-jobs/Jenkinsfile new file mode 100644 index 0000000000..37bad74c46 --- /dev/null +++ b/pipelines/ops/kill-spark-jobs/Jenkinsfile @@ -0,0 +1,51 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/kill_spark_jobs.yaml" + ansibleExtraArgs = "-v" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + summary() + } + catch (err) { + currentBuild.result = 'FAILURE' + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From ece7ed3685bab5395e4333927c9fdff1282fbc27 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Fri, 16 Dec 2022 11:30:33 +0530 Subject: [PATCH 144/203] Update ingestion specs (#3677) --- ansible/roles/ml-analytics-service/defaults/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 65274b1182..d73099451d 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -53,12 +53,12 @@ ml_analytics_project_output_dir: "{{ WORKDIR }}/source/projects/output" ml_analytics_observation_status_output_dir: "{{ WORKDIR }}/source/observations/status/output" ml_analytics_api_authorization_key: "{{ml_api_auth_token | default('sunbird_api_auth_token')}}" ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_internal_access_token')}}" -ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' +ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code","isRubricDriven","criteriaLevelReport"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"},{"type":"long","name":"no_of_certificate_issued"}]},"metricsSpec":[]}}}' ml_analytics_obs_distinctCnt_cloud_blob_path: "observation/distinctCount/" ml_analytics_obs_distinctCnt_domain_cloud_blob_path: "observation/distinctCount_domain/" ml_analytics_obs_distinctCnt_domain_criteria_cloud_blob_path: "observation/distinctCount_domain_criteria/" @@ -77,7 +77,7 @@ ml_analytics_observation_status_rollup_output_dir: "/opt/sparkjobs/source/observ ml_analytics_druid_project_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/rollup/projects_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"iso"},"dimensionsSpec":{"dimensions":["project_title","project_goal","area_of_improvement","status_of_project","tasks_name","tasks_status","designation","task_evidence_status","project_id","task_id","project_created_type","parent_channel","program_id","program_name","project_updated_date","createdBy","program_externalId","private_program","task_deleted_flag","project_terms_and_condition","state_externalId","block_externalId","district_externalId","cluster_externalId","school_externalId","state_name","block_name","district_name","cluster_name","school_name","board_name","organisation_name","solution_id","organisation_id",{"name":"status_code","type":"long"}]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_createBy","fieldName":"createdBy"},{"type":"HLLSketchBuild","name":"count_of_project_id","fieldName":"project_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_druid_observation_status_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/rollup/observation_status_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-observation-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["status","user_id","solution_id","submission_id","entity_name","completedDate","program_id","private_program","solution_type","updatedAt","role_title","solution_name","program_name","channel","parent_channel","block_name","district_name","school_name","cluster_name","state_name","organisation_name","board_name","district_externalId","state_externalId","block_externalId","cluster_externalId","school_externalId","organisation_id",{"type":"long","name":"status_code"}]},"metricsSpec":[{"type":"count","name":"count"},{"type":"longSum","name":"sum___v","fieldName":"__v","expression":null},{"type":"HLLSketchBuild","name":"count_distinct_solution","fieldName":"solution_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_submission_id","fieldName":"submission_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_user_id","fieldName":"user_id","lgK":12,"tgtHllType":"HLL_4","round":false}]}}}' ml_analytics_druid_rollup_url: "{{groups['druid'][0]}}:8081" -ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"},{"type":"long","name":"no_of_certificate_issued"}]},"metricsSpec":[]}}}' ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/projects/distinctCountPrglevel/output" ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCountPrglevel/" ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" From 6b868b4c347913291b98b4dd48b3af574e7d57a6 Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Tue, 20 Dec 2022 10:58:13 +0530 Subject: [PATCH 145/203] updated jenkins version and plugins (#3679) --- deploy/jenkins/jenkins-plugins-setup.sh | 8 ++++---- deploy/jenkins/jenkins-server-setup.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/jenkins/jenkins-plugins-setup.sh b/deploy/jenkins/jenkins-plugins-setup.sh index ed9a8c1756..c4256b3620 100755 --- a/deploy/jenkins/jenkins-plugins-setup.sh +++ b/deploy/jenkins/jenkins-plugins-setup.sh @@ -17,13 +17,13 @@ fi echo -e "\n\e[0;32m${bold}Downloading and copying jenkins plugin directory to Jenkins ${normal}" if [[ ! -d /var/lib/jenkins/plugins ]]; then -wget https://sunbirdpublic.blob.core.windows.net/installation/plugins.tar -tar -xf plugins.tar +wget https://sunbirdpublic.blob.core.windows.net/installation/plugins-2-319-3.tar +tar -xf plugins-2-319-3.tar mv plugins /var/lib/jenkins/ chown -R jenkins:jenkins /var/lib/jenkins/plugins else -wget https://sunbirdpublic.blob.core.windows.net/installation/plugins.tar -tar -xf plugins.tar +wget https://sunbirdpublic.blob.core.windows.net/installation/plugins-2-319-3.tar +tar -xf plugins-2-319-3.tar cp -rf plugins/* /var/lib/jenkins/plugins/ chown -R jenkins:jenkins /var/lib/jenkins/plugins fi diff --git a/deploy/jenkins/jenkins-server-setup.sh b/deploy/jenkins/jenkins-server-setup.sh index ad2b361671..f2ef322969 100755 --- a/deploy/jenkins/jenkins-server-setup.sh +++ b/deploy/jenkins/jenkins-server-setup.sh @@ -15,7 +15,7 @@ echo -e "\n\e[0;32m${bold}Installating Jenkins${normal}" wget -q -O - https://pkg.jenkins.io/debian-stable/jenkins.io.key | apt-key add - apt-add-repository "deb https://pkg.jenkins.io/debian-stable binary/" apt-get update -apt-get install -y jenkins=2.277.4 +apt-get install -y jenkins=2.319.3 echo -e "\n\e[0;32m${bold}Installating PIP${normal}" apt-get install -y python-pip From b61a35fad0362ea7eb0bb688ff0bc12ffc811571 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Tue, 20 Dec 2022 10:59:31 +0530 Subject: [PATCH 146/203] Issue #ED-592 fix: Pdata version updated (#3678) --- .../artifacts/sunbird/login/resources/js/telemetry_service.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js b/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js index e1f88cc741..f9250fd425 100644 --- a/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js +++ b/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js @@ -2506,7 +2506,7 @@ if(client_id.toLowerCase() === 'android'){ "telemetry": { "pdata": { "id": pdataId, - "ver": "5.0.0", + "ver": "5.1.0", "pid": "sunbird-portal" } } @@ -2687,7 +2687,6 @@ if(client_id.toLowerCase() === 'android'){ function stringToHTML(str) { let parser = new DOMParser(); let doc = parser.parseFromString(str, 'text/html'); - console.log('Doc parse => ', doc); // TODO: log! return doc?.body?.innerText || document.createElement('body'); } From 3e1b3207fed9a61257b17f8cf13e41157149b162 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 14:49:57 +0530 Subject: [PATCH 147/203] csp migration variables update --- .../helm_charts/core/analytics/templates/deployment.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index 57198cb77b..b0d7aad44b 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -35,13 +35,13 @@ spec: value: {{ .Values.env.min_heap | quote }} - name: MAX_HEAP value: {{ .Values.env.max_heap | quote }} - - name: cloud_storage_secret + - name: azure_storage_secret value: {{ .Values.env.cloud_private_account_secret | quote }} - - name: cloud_storage_key + - name: azure_storage_key value: {{ .Values.env.cloud_private_account_name | quote }} - - name: public_cloud_storage_secret + - name: public_azure_storage_secret value: {{ .Values.env.cloud_public_account_secret | quote }} - - name: public_cloud_storage_key + - name: public_azure_storage_key value: {{ .Values.env.cloud_public_account_name | quote }} - name: _JAVA_OPTIONS value: -Dlog4j2.formatMsgNoLookups=true From 56ea8f9b418ebe6ee2c566fe7959eb02e1bc3ad2 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 14:51:00 +0530 Subject: [PATCH 148/203] csp migration variables update --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index f86925ad5c..354dcab3d3 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - cloud_private_account_secret: {{ sunbird_private_storage_account_key }} - cloud_private_account_name: {{ sunbird_private_storage_account_name }} - cloud_public_account_secret: {{ sunbird_public_storage_account_key }} - cloud_public_account_name: {{ sunbird_public_storage_account_name }} + azure_private_account_secret: {{ sunbird_private_storage_account_key }} + azure_private_account_name: {{ sunbird_private_storage_account_name }} + azure_public_account_secret: {{ sunbird_public_storage_account_key }} + azure_public_account_name: {{ sunbird_public_storage_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From 94ec16350315841289c39bca6f703e3f8f6c4d1a Mon Sep 17 00:00:00 2001 From: Santhosh Gandham Date: Thu, 22 Dec 2022 14:54:09 +0530 Subject: [PATCH 149/203] Updated template value for dial ansible group (#3681) --- private_repo/ansible/inventory/dev/KnowledgePlatform/hosts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts index e735ac4c01..c144bc6fa2 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts @@ -17,7 +17,7 @@ learning1 redis1 [dial1] -18.3.1.5 +10.0.1.5 [dial:children] dial1 From 6c6eebff0ea49dbf0227d5d4e42f052a2105beba Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 15:29:57 +0530 Subject: [PATCH 150/203] csp migration variables update --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 354dcab3d3..0a849d99b9 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ sunbird_private_storage_account_key }} - azure_private_account_name: {{ sunbird_private_storage_account_name }} - azure_public_account_secret: {{ sunbird_public_storage_account_key }} - azure_public_account_name: {{ sunbird_public_storage_account_name }} + azure_private_account_secret: {{ cloud_private_account_secret }} + azure_private_account_name: {{ cloud_private_account_name }} + azure_public_account_secret: {{ cloud_public_account_secret }} + azure_public_account_name: {{ cloud_public_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From afe31e6358d0839145b7d320c0259a6204be7f01 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 15:31:16 +0530 Subject: [PATCH 151/203] csp migration variables update --- .../helm_charts/core/analytics/templates/deployment.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index b0d7aad44b..0926360f76 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -36,13 +36,13 @@ spec: - name: MAX_HEAP value: {{ .Values.env.max_heap | quote }} - name: azure_storage_secret - value: {{ .Values.env.cloud_private_account_secret | quote }} + value: {{ .Values.env.azure_private_account_secret | quote }} - name: azure_storage_key - value: {{ .Values.env.cloud_private_account_name | quote }} + value: {{ .Values.env.azure_private_account_name | quote }} - name: public_azure_storage_secret - value: {{ .Values.env.cloud_public_account_secret | quote }} + value: {{ .Values.env.azure_public_account_secret | quote }} - name: public_azure_storage_key - value: {{ .Values.env.cloud_public_account_name | quote }} + value: {{ .Values.env.azure_public_account_name | quote }} - name: _JAVA_OPTIONS value: -Dlog4j2.formatMsgNoLookups=true envFrom: From c7db90c106a7f82540d1cf211d42e419d6d10a75 Mon Sep 17 00:00:00 2001 From: Sadanand <100120230+SadanandGowda@users.noreply.github.com> Date: Thu, 22 Dec 2022 16:41:15 +0530 Subject: [PATCH 152/203] csp migration variables update (#3671) --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 354dcab3d3..0a849d99b9 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ sunbird_private_storage_account_key }} - azure_private_account_name: {{ sunbird_private_storage_account_name }} - azure_public_account_secret: {{ sunbird_public_storage_account_key }} - azure_public_account_name: {{ sunbird_public_storage_account_name }} + azure_private_account_secret: {{ cloud_private_account_secret }} + azure_private_account_name: {{ cloud_private_account_name }} + azure_public_account_secret: {{ cloud_public_account_secret }} + azure_public_account_name: {{ cloud_public_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From 1195db50267581f35163107d7b6c5bea1433eed2 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 16:52:39 +0530 Subject: [PATCH 153/203] csp migration variables update --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 0a849d99b9..c3cd3b46f7 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ cloud_private_account_secret }} - azure_private_account_name: {{ cloud_private_account_name }} - azure_public_account_secret: {{ cloud_public_account_secret }} - azure_public_account_name: {{ cloud_public_account_name }} + azure_private_account_secret: {{ cloud_private_storage_secret }} + azure_private_account_name: {{ cloud_private_storage_accountname }} + azure_public_account_secret: {{ cloud_public_storage_secret }} + azure_public_account_name: {{ cloud_public_storage_accountname }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From 35e9deae2ad28fa4334652b079c19588098f2cbc Mon Sep 17 00:00:00 2001 From: NIKHIL VARMA M <63706239+nikhilvarma940@users.noreply.github.com> Date: Fri, 23 Dec 2022 16:55:48 +0530 Subject: [PATCH 154/203] Ansible-postgres-patroni cluster (#3684) * Ansible-postgres-patroni cluster * README.md updated --- ansible/postgresql-patroni-cluster.yaml | 14 + ansible/roles/ansible-etcd/README.md | 100 +++++ ansible/roles/ansible-etcd/defaults/main.yml | 14 + ansible/roles/ansible-etcd/handlers/main.yml | 12 + ansible/roles/ansible-etcd/meta/main.yml | 52 +++ ansible/roles/ansible-etcd/tasks/main.yml | 19 + ansible/roles/ansible-etcd/templates/etcd.j2 | 403 ++++++++++++++++++ ansible/roles/ansible-etcd/vars/main.yml | 2 + ansible/roles/ansible-haproxy/README.md | 100 +++++ .../roles/ansible-haproxy/defaults/main.yml | 2 + .../roles/ansible-haproxy/handlers/main.yml | 12 + ansible/roles/ansible-haproxy/meta/main.yml | 52 +++ ansible/roles/ansible-haproxy/tasks/main.yml | 19 + .../ansible-haproxy/templates/haproxy.cfg.j2 | 26 ++ ansible/roles/ansible-haproxy/vars/main.yml | 2 + .../roles/ansible-postgres_patroni/README.md | 100 +++++ .../defaults/main.yml | 16 + .../handlers/main.yml | 18 + .../ansible-postgres_patroni/meta/main.yml | 52 +++ .../ansible-postgres_patroni/tasks/main.yml | 105 +++++ .../templates/patroni.service.j2 | 17 + .../templates/patroni.yaml.j2 | 58 +++ .../ansible-postgres_patroni/vars/main.yml | 2 + 23 files changed, 1197 insertions(+) create mode 100644 ansible/postgresql-patroni-cluster.yaml create mode 100644 ansible/roles/ansible-etcd/README.md create mode 100644 ansible/roles/ansible-etcd/defaults/main.yml create mode 100644 ansible/roles/ansible-etcd/handlers/main.yml create mode 100644 ansible/roles/ansible-etcd/meta/main.yml create mode 100644 ansible/roles/ansible-etcd/tasks/main.yml create mode 100644 ansible/roles/ansible-etcd/templates/etcd.j2 create mode 100644 ansible/roles/ansible-etcd/vars/main.yml create mode 100644 ansible/roles/ansible-haproxy/README.md create mode 100644 ansible/roles/ansible-haproxy/defaults/main.yml create mode 100644 ansible/roles/ansible-haproxy/handlers/main.yml create mode 100644 ansible/roles/ansible-haproxy/meta/main.yml create mode 100644 ansible/roles/ansible-haproxy/tasks/main.yml create mode 100644 ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 create mode 100644 ansible/roles/ansible-haproxy/vars/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/README.md create mode 100644 ansible/roles/ansible-postgres_patroni/defaults/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/handlers/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/meta/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/tasks/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 create mode 100644 ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 create mode 100644 ansible/roles/ansible-postgres_patroni/vars/main.yml diff --git a/ansible/postgresql-patroni-cluster.yaml b/ansible/postgresql-patroni-cluster.yaml new file mode 100644 index 0000000000..7fcae245a4 --- /dev/null +++ b/ansible/postgresql-patroni-cluster.yaml @@ -0,0 +1,14 @@ +- hosts: etcd + become: yes + roles: + - ansible-etcd + +- hosts: postgresql + become: yes + roles: + - ansible-postgres_patroni + +- hosts: haproxy + become: yes + roles: + - ansible-haproxy diff --git a/ansible/roles/ansible-etcd/README.md b/ansible/roles/ansible-etcd/README.md new file mode 100644 index 0000000000..15c49e336a --- /dev/null +++ b/ansible/roles/ansible-etcd/README.md @@ -0,0 +1,100 @@ +Role Name +========= +``` +postgresql-cluster-ansible +``` +Requirements +------------ +``` +1. comment or uncomment the properties in templates of the roles available as per the requirement. +2. provide the variables where ever required. +``` +Role Variables +-------------- +``` +In hosts files: +1. etcd_ip : +2. postgresql_origin: +3. postgresql_1: +4. postgresql_2: +5. postgresql_3: + + +etcd Role variables: +postgres_patroni_etcd_name: "postgres-etcd" # cluster name +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" # initial cluster +postgres_patroni_etcd_initial_cluster_state: "postgres" # initial cluster state +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" # initial cluster token +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" # initial advertise peer urls +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" # listen peer urls +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" # listen client urls +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" # advertise client urls + +Ansible-postgres_patroni role Variables: +#patroni .yaml config +postgres_cluster_name: postgresql-prod # Cluster name + +# users admin password +postgres_patroni_admin_password: admin # Admin Password + +#Authentication +# Replication +postgres_patroni_replication_username: replicator # Replication Username +postgres_patroni_replication_password: password # Replication password + +#SuperUser +postgres_patroni_superuser_username: postgres # Superuser username +postgres_patroni_superuser_password: password # Superuser Password +``` +Architecture +------------ +![Untitled Diagram (1)](https://user-images.githubusercontent.com/63706239/203470986-f8ec3d56-a6d2-4678-b594-dc20a29ec972.jpg) + +``` +Description: +Ansible postgres cluter role is used to setup a postgres cluster with 1 Primary and 2 replicas where we are using the patroni as HA solution for postgres cluster.Patroni can be configured to handle tasks like replication, backups and restorations.We are also using HAProxy load Balancer to route the traffic and Etcd is a fault-tolerant, distributed key-value store that is used to store the state of the Postgres cluster. Via Patroni, all of the Postgres nodes make use of etcd to keep the Postgres cluster up and running. + +Users and applications can access the postgres server using Haproxy IP and Port defined in the haproxy configuration rules. +``` + +Inventory hosts file as shown Below +----------------------------------- +``` +[etcd] +192.168.245.129 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[postgresql] +192.168.245.129 postgresql_origin=192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[haproxy] +192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 ansible_ssh_user=ubuntu +``` + +License +------- +``` +BSD +``` +Author Information +------------------ +``` +Nikhil Varma + +Senior DevOps Engineer +``` + +postgres cluster setup using ansible +----------------------------------- + +``` +# Command to run Ansibe-postgresql role + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass + +# Commands to run postgresql roles by using the tags and skipping the tags + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --tags="" +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --skip-tags="" +``` diff --git a/ansible/roles/ansible-etcd/defaults/main.yml b/ansible/roles/ansible-etcd/defaults/main.yml new file mode 100644 index 0000000000..0478f26652 --- /dev/null +++ b/ansible/roles/ansible-etcd/defaults/main.yml @@ -0,0 +1,14 @@ +--- +# defaults file for ansible-etcd + + +# etcd cluster variables +postgres_patroni_etcd_name: "postgres-etcd" +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" +postgres_patroni_etcd_initial_cluster_state: "postgres" +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" +#etcd_data_dir: \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/handlers/main.yml b/ansible/roles/ansible-etcd/handlers/main.yml new file mode 100644 index 0000000000..33d54607a2 --- /dev/null +++ b/ansible/roles/ansible-etcd/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for ansible-etcd +- name: Restart etcd systemd + systemd: + name: etcd.service + state: restarted + daemon_reload: yes + +- name: Restart etcd service + systemd: + name: etcd.service + state: restarted \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/meta/main.yml b/ansible/roles/ansible-etcd/meta/main.yml new file mode 100644 index 0000000000..6b5d1fd295 --- /dev/null +++ b/ansible/roles/ansible-etcd/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: Nikhil Varma + description: Ansible-etcd for distributed key store for postgresql cluster +# company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/tasks/main.yml b/ansible/roles/ansible-etcd/tasks/main.yml new file mode 100644 index 0000000000..5e6d329bc1 --- /dev/null +++ b/ansible/roles/ansible-etcd/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# tasks file for ansible-etcd +- name: Install etcd after updating apt + apt: + name: + - etcd + state: present + update_cache: yes + tags: + - etcd Install + +- name: Template configuration file to etcd + template: + src: etcd.j2 + dest: '/etc/default/etcd' + notify: + - Restart etcd service + tags: + - Restart etcd \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/templates/etcd.j2 b/ansible/roles/ansible-etcd/templates/etcd.j2 new file mode 100644 index 0000000000..09e6ad6450 --- /dev/null +++ b/ansible/roles/ansible-etcd/templates/etcd.j2 @@ -0,0 +1,403 @@ +## etcd(1) daemon options +## See "/usr/share/doc/etcd-server/op-guide/configuration.md.gz" + +### Member flags + +##### --name +## Human-readable name for this member. +## This value is referenced as this node's own entries listed in the +## `--initial-cluster` flag (e.g., `default=http://localhost:2380`). This +## needs to match the key used in the flag if using static bootstrapping. When +## using discovery, each member must have a unique name. `Hostname` or +## `machine-id` can be a good choice. +## default: "default" +#ETCD_NAME="postgres-etcd" + +##### --data-dir +## Path to the data directory. +## default: "${name}.etcd" +# ETCD_DATA_DIR="/var/lib/etcd/default" + +##### --wal-dir +## Path to the dedicated wal directory. If this flag is set, etcd will write +## the WAL files to the walDir rather than the dataDir. This allows a +## dedicated disk to be used, and helps avoid io competition between logging +## and other IO operations. +## default: "" +# ETCD_WAL_DIR + +##### --snapshot-count +## Number of committed transactions to trigger a snapshot to disk. +## default: "100000" +# ETCD_SNAPSHOT_COUNT="100000" + +##### --heartbeat-interval +## Time (in milliseconds) of a heartbeat interval. +## default: "100" +# ETCD_HEARTBEAT_INTERVAL="100" + +##### --election-timeout +## Time (in milliseconds) for an election to timeout. See +## /usr/share/doc/etcd-server/tuning.md.gz for details. +## default: "1000" +# ETCD_ELECTION_TIMEOUT="1000" + +##### --listen-peer-urls +## List of URLs to listen on for peer traffic. This flag tells the etcd to +## accept incoming requests from its peers on the specified scheme://IP:port +## combinations. Scheme can be either http or https.If 0.0.0.0 is specified as +## the IP, etcd listens to the given port on all interfaces. If an IP address is +## given as well as a port, etcd will listen on the given port and interface. +## Multiple URLs may be used to specify a number of addresses and ports to listen +## on. The etcd will respond to requests from any of the listed addresses and +## ports. +## default: "http://localhost:2380" +## example: "http://10.0.0.1:2380" +## invalid example: "http://example.com:2380" (domain name is invalid for binding) +#ETCD_LISTEN_PEER_URLS="http://172.51.1.29:2380" + +##### --listen-client-urls +## List of URLs to listen on for client traffic. This flag tells the etcd to +## accept incoming requests from the clients on the specified scheme://IP:port +## combinations. Scheme can be either http or https. If 0.0.0.0 is specified as +## the IP, etcd listens to the given port on all interfaces. If an IP address is +## given as well as a port, etcd will listen on the given port and interface. +## Multiple URLs may be used to specify a number of addresses and ports to listen +## on. The etcd will respond to requests from any of the listed addresses and +## ports. +## default: "http://localhost:2379" +## example: "http://10.0.0.1:2379" +## invalid example: "http://example.com:2379" (domain name is invalid for binding) +#ETCD_LISTEN_CLIENT_URLS="http://172.51.1.29:2379,http://127.0.0.1:2379" + +##### --max-snapshots +## Maximum number of snapshot files to retain (0 is unlimited) +## The default for users on Windows is unlimited, and manual purging down to 5 +## (or some preference for safety) is recommended. +## default: 5 +# ETCD_MAX_SNAPSHOTS="5" + +##### --max-wals +## Maximum number of wal files to retain (0 is unlimited) +## The default for users on Windows is unlimited, and manual purging down to 5 +## (or some preference for safety) is recommended. +## default: 5 +# ETCD_MAX_WALS="5" + +##### --cors +## Comma-separated white list of origins for CORS (cross-origin resource +## sharing). +## default: none +# ETCD_CORS + +#### --quota-backend-bytes +## Raise alarms when backend size exceeds the given quota (0 defaults to low +## space quota). +## default: 0 +# ETCD_QUOTA_BACKEND_BYTES="0" + +#### --backend-batch-limit +## BackendBatchLimit is the maximum operations before commit the backend +## transaction. +## default: 0 +# ETCD_BACKEND_BATCH_LIMIT="0" + +#### --backend-batch-interval +## BackendBatchInterval is the maximum time before commit the backend +## transaction. +## default: 0 +# ETCD_BACKEND_BATCH_INTERVAL="0" + +#### --max-txn-ops +## Maximum number of operations permitted in a transaction. +## default: 128 +# ETCD_MAX_TXN_OPS="128" + +#### --max-request-bytes +## Maximum client request size in bytes the server will accept. +## default: 1572864 +# ETCD_MAX_REQUEST_BYTES="1572864" + +#### --grpc-keepalive-min-time +## Minimum duration interval that a client should wait before pinging server. +## default: 5s +# ETCD_GRPC_KEEPALIVE_MIN_TIME="5" + +#### --grpc-keepalive-interval +## Frequency duration of server-to-client ping to check if a connection is +## alive (0 to disable). +## default: 2h +# ETCD_GRPC_KEEPALIVE_INTERVAL="2h" + +#### --grpc-keepalive-timeout +## Additional duration of wait before closing a non-responsive connection +## (0 to disable). +## default: 20s +# ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" + + +### Clustering flags + +# `--initial` prefix flags are used in bootstrapping (static bootstrap, +# discovery-service bootstrap or runtime reconfiguration) a new member, and +# ignored when restarting an existing member. + +# `--discovery` prefix flags need to be set when using discovery service. + +##### --initial-advertise-peer-urls + +## List of this member's peer URLs to advertise to the rest of the cluster. +## These addresses are used for communicating etcd data around the cluster. At +## least one must be routable to all cluster members. These URLs can contain +## domain names. +## default: "http://localhost:2380" +## example: "http://example.com:2380, http://10.0.0.1:2380" +#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://172.51.1.29:2380" + +##### --initial-cluster +## Initial cluster configuration for bootstrapping. +## The key is the value of the `--name` flag for each node provided. The +## default uses `default` for the key because this is the default for the +## `--name` flag. +## default: "default=http://localhost:2380" +#ETCD_INITIAL_CLUSTER="postgres-etcd=http://172.51.1.29:2380" + +##### --initial-cluster-state +## Initial cluster state ("new" or "existing"). Set to `new` for all members +## present during initial static or DNS bootstrapping. If this option is set to +## `existing`, etcd will attempt to join the existing cluster. If the wrong value +## is set, etcd will attempt to start but fail safely. +## default: "new" +# ETCD_INITIAL_CLUSTER_STATE="new" + +##### --initial-cluster-token +## Initial cluster token for the etcd cluster during bootstrap. +## default: "etcd-cluster" +#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" + +##### --advertise-client-urls +## List of this member's client URLs to advertise to the rest of the cluster. +## These URLs can contain domain names. +## Be careful if advertising URLs such as http://localhost:2379 from a cluster +## member and are using the proxy feature of etcd. This will cause loops, because +## the proxy will be forwarding requests to itself until its resources (memory, +## file descriptors) are eventually depleted. +## default: "http://localhost:2379" +## example: "http://example.com:2379, http://10.0.0.1:2379" +#ETCD_ADVERTISE_CLIENT_URLS="http://172.51.1.29:2379" + +##### --discovery +## Discovery URL used to bootstrap the cluster. +## default: none +# ETCD_DISCOVERY + +##### --discovery-srv +## DNS srv domain used to bootstrap the cluster. +## default: none +# ETCD_DISCOVERY_SRV + +##### --discovery-fallback +## Expected behavior ("exit" or "proxy") when discovery services fails. "proxy" +## supports v2 API only. +## default: "proxy" +# ETCD_DISCOVERY_FALLBACK="proxy" + +##### --discovery-proxy +## HTTP proxy to use for traffic to discovery service. +## default: none +# ETCD_DISCOVERY_PROXY + +##### --strict-reconfig-check +## Reject reconfiguration requests that would cause quorum loss. +## default: false +# ETCD_STRICT_RECONFIG_CHECK + +##### --auto-compaction-retention +## Auto compaction retention for mvcc key value store in hour. 0 means disable +## auto compaction. +## default: 0 +# ETCD_AUTO_COMPACTION_RETENTION="0" + +##### --enable-v2 +## Accept etcd V2 client requests +## default: true +# ETCD_ENABLE_V2="true" + + +### Proxy flags + +# `--proxy` prefix flags configures etcd to run in proxy mode. "proxy" supports +# v2 API only. + +##### --proxy +## Proxy mode setting ("off", "readonly" or "on"). +## default: "off" +# ETCD_PROXY="off" + +##### --proxy-failure-wait +## Time (in milliseconds) an endpoint will be held in a failed state before +## being reconsidered for proxied requests. +## default: 5000 +# ETCD_PROXY_FAILURE_WAIT="5000" + +##### --proxy-refresh-interval +## Time (in milliseconds) of the endpoints refresh interval. +## default: 30000 +# ETCD_PROXY_REFRESH_INTERVAL="30000" + +##### --proxy-dial-timeout +## Time (in milliseconds) for a dial to timeout or 0 to disable the timeout +## default: 1000 +# ETCD_PROXY_DIAL_TIMEOUT="1000" + +##### --proxy-write-timeout +## Time (in milliseconds) for a write to timeout or 0 to disable the timeout. +## default: 5000 +# ETCD_PROXY_WRITE_TIMEOUT="5000" + +##### --proxy-read-timeout +## Time (in milliseconds) for a read to timeout or 0 to disable the timeout. +## Don't change this value if using watches because use long polling requests. +## default: 0 +# ETCD_PROXY_READ_TIMEOUT="0" + + +### Security flags + +# The security flags help to build a secure etcd cluster. + +##### --ca-file (**DEPRECATED**) +## Path to the client server TLS CA file. `--ca-file ca.crt` could be replaced +## by `--trusted-ca-file ca.crt --client-cert-auth` and etcd will perform the +## same. +## default: none +# ETCD_CA_FILE + +##### --cert-file +## Path to the client server TLS cert file. +## default: none +# ETCD_CERT_FILE + +##### --key-file +## Path to the client server TLS key file. +## default: none +# ETCD_KEY_FILE + +##### --client-cert-auth +## Enable client cert authentication. +## CN authentication is not supported by gRPC-gateway. +## default: false +# ETCD_CLIENT_CERT_AUTH + +#### --client-crl-file +## Path to the client certificate revocation list file. +## default: "" +# ETCD_CLIENT_CRL_FILE + +##### --trusted-ca-file +## Path to the client server TLS trusted CA key file. +## default: none +# ETCD_TRUSTED_CA_FILE + +##### --auto-tls +## Client TLS using generated certificates +## default: false +# ETCD_AUTO_TLS + +##### --peer-ca-file (**DEPRECATED**) +## Path to the peer server TLS CA file. `--peer-ca-file ca.crt` could be +## replaced by `--peer-trusted-ca-file ca.crt --peer-client-cert-auth` and etcd +## will perform the same. +## default: none +# ETCD_PEER_CA_FILE + +##### --peer-cert-file +## Path to the peer server TLS cert file. +## default: none +# ETCD_PEER_CERT_FILE + +##### --peer-key-file +## Path to the peer server TLS key file. +## default: none +# ETCD_PEER_KEY_FILE + +##### --peer-client-cert-auth +## Enable peer client cert authentication. +## default: false +# ETCD_PEER_CLIENT_CERT_AUTH + +#### --peer-crl-file +## Path to the peer certificate revocation list file. +## default: "" +# ETCD_PEER_CRL_FILE + +##### --peer-trusted-ca-file +## Path to the peer server TLS trusted CA file. +## default: none +# ETCD_PEER_TRUSTED_CA_FILE + +##### --peer-auto-tls +## Peer TLS using generated certificates +## default: false +# ETCD_PEER_AUTO_TLS + +#### --peer-cert-allowed-cn +## Allowed CommonName for inter peer authentication. +## default: none +# ETCD_PEER_CERT_ALLOWED_CN + +#### --cipher-suites +## Comma-separated list of supported TLS cipher suites between server/client and +## peers. +## default: "" +# ETCD_CIPHER_SUITES + +#### --experimental-peer-skip-client-san-verification +## Skip verification of SAN field in client certificate for peer connections. +## default: false +#+ ETCD_EXPERIMENTAL_PEER_SKIP_CLIENT_SAN_VERIFICATION + + +### Logging flags + +#### --log-outputs +## Specify 'stdout' or 'stderr' to skip journald logging even when running +## under systemd, or list of comma separated output targets. +## default: default +# ETCD_LOG_OUTPUTS + +##### --debug +## Drop the default log level to DEBUG for all subpackages. +## default: false (INFO for all packages) +# ETCD_DEBUG + +##### --log-package-levels +## Set individual etcd subpackages to specific log levels. An example being +## `etcdserver=WARNING,security=DEBUG` +## default: none (INFO for all packages) +# ETCD_LOG_PACKAGE_LEVELS + + +### Unsafe flags + +# Please be CAUTIOUS when using unsafe flags because it will break the guarantees given by the consensus protocol. +# For example, it may panic if other members in the cluster are still alive. +# Follow the instructions when using these flags. + +##### --force-new-cluster +## Force to create a new one-member cluster. It commits configuration changes +## forcing to remove all existing members in the cluster and add itself. It needs +## to be set to restore a backup. +## default: false +# ETCD_FORCE_NEW_CLUSTER +# +# +ETCD_INITIAL_CLUSTER="{{ postgres_patroni_etcd_initial_cluster }}" +ETCD_INITIAL_CLUSTER_STATE="{{ postgres_patroni_etcd_initial_cluster_state }}" +ETCD_INITIAL_CLUSTER_TOKEN="{{ postgres_patroni_etcd_initial_cluster_token }}" +ETCD_INITIAL_ADVERTISE_PEER_URLS="{{ postgres_patroni_etcd_initial_advertise_peer_urls }}" +#ETCD_DATA_DIR="/var/etcd" +ETCD_LISTEN_PEER_URLS="{{ postgres_patroni_etcd_listen_peer_urls }}" +ETCD_LISTEN_CLIENT_URLS="{{ postgres_patroni_etcd_listen_client_urls }}" +ETCD_ADVERTISE_CLIENT_URLS="{{ postgres_patroni_etcd_advertise_client_urls }}" +ETCD_NAME="{{ postgres_patroni_etcd_name }}" \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/vars/main.yml b/ansible/roles/ansible-etcd/vars/main.yml new file mode 100644 index 0000000000..411544ecd6 --- /dev/null +++ b/ansible/roles/ansible-etcd/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible-etcd \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/README.md b/ansible/roles/ansible-haproxy/README.md new file mode 100644 index 0000000000..15c49e336a --- /dev/null +++ b/ansible/roles/ansible-haproxy/README.md @@ -0,0 +1,100 @@ +Role Name +========= +``` +postgresql-cluster-ansible +``` +Requirements +------------ +``` +1. comment or uncomment the properties in templates of the roles available as per the requirement. +2. provide the variables where ever required. +``` +Role Variables +-------------- +``` +In hosts files: +1. etcd_ip : +2. postgresql_origin: +3. postgresql_1: +4. postgresql_2: +5. postgresql_3: + + +etcd Role variables: +postgres_patroni_etcd_name: "postgres-etcd" # cluster name +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" # initial cluster +postgres_patroni_etcd_initial_cluster_state: "postgres" # initial cluster state +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" # initial cluster token +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" # initial advertise peer urls +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" # listen peer urls +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" # listen client urls +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" # advertise client urls + +Ansible-postgres_patroni role Variables: +#patroni .yaml config +postgres_cluster_name: postgresql-prod # Cluster name + +# users admin password +postgres_patroni_admin_password: admin # Admin Password + +#Authentication +# Replication +postgres_patroni_replication_username: replicator # Replication Username +postgres_patroni_replication_password: password # Replication password + +#SuperUser +postgres_patroni_superuser_username: postgres # Superuser username +postgres_patroni_superuser_password: password # Superuser Password +``` +Architecture +------------ +![Untitled Diagram (1)](https://user-images.githubusercontent.com/63706239/203470986-f8ec3d56-a6d2-4678-b594-dc20a29ec972.jpg) + +``` +Description: +Ansible postgres cluter role is used to setup a postgres cluster with 1 Primary and 2 replicas where we are using the patroni as HA solution for postgres cluster.Patroni can be configured to handle tasks like replication, backups and restorations.We are also using HAProxy load Balancer to route the traffic and Etcd is a fault-tolerant, distributed key-value store that is used to store the state of the Postgres cluster. Via Patroni, all of the Postgres nodes make use of etcd to keep the Postgres cluster up and running. + +Users and applications can access the postgres server using Haproxy IP and Port defined in the haproxy configuration rules. +``` + +Inventory hosts file as shown Below +----------------------------------- +``` +[etcd] +192.168.245.129 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[postgresql] +192.168.245.129 postgresql_origin=192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[haproxy] +192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 ansible_ssh_user=ubuntu +``` + +License +------- +``` +BSD +``` +Author Information +------------------ +``` +Nikhil Varma + +Senior DevOps Engineer +``` + +postgres cluster setup using ansible +----------------------------------- + +``` +# Command to run Ansibe-postgresql role + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass + +# Commands to run postgresql roles by using the tags and skipping the tags + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --tags="" +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --skip-tags="" +``` diff --git a/ansible/roles/ansible-haproxy/defaults/main.yml b/ansible/roles/ansible-haproxy/defaults/main.yml new file mode 100644 index 0000000000..2b616ee2c8 --- /dev/null +++ b/ansible/roles/ansible-haproxy/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ansible-haproxy diff --git a/ansible/roles/ansible-haproxy/handlers/main.yml b/ansible/roles/ansible-haproxy/handlers/main.yml new file mode 100644 index 0000000000..0f55472f28 --- /dev/null +++ b/ansible/roles/ansible-haproxy/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for ansible-haproxy +- name: Restart haproxy systemd + systemd: + name: haproxy.service + state: restarted + daemon_reload: yes + +- name: Restart haproxy service + systemd: + name: haproxy.service + state: restarted \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/meta/main.yml b/ansible/roles/ansible-haproxy/meta/main.yml new file mode 100644 index 0000000000..bb6de485a4 --- /dev/null +++ b/ansible/roles/ansible-haproxy/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: Nikhil Varma + description: Ansible HAProxy for postgresql cluster + #company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/roles/ansible-haproxy/tasks/main.yml b/ansible/roles/ansible-haproxy/tasks/main.yml new file mode 100644 index 0000000000..2c01238c57 --- /dev/null +++ b/ansible/roles/ansible-haproxy/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# tasks file for ansible-haproxy +- name: Install HaProxy after updating apt + apt: + name: + - haproxy + state: present + update_cache: yes + tags: + - HaProxy Install + +- name: Template configuration file to haproxy.cfg + template: + src: haproxy.cfg.j2 + dest: '/etc/haproxy/haproxy.cfg' + notify: + - Restart haproxy service + tags: + - Restart haproxy \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 b/ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 0000000000..0a85d2b27b --- /dev/null +++ b/ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,26 @@ +global + maxconn 100 + +defaults + log global + mode tcp + retries 2 + timeout client 30m + timeout connect 4s + timeout server 30m + timeout check 5s + +listen stats + mode http + bind *:7000 + stats enable + stats uri / + +listen postgres + bind *:5000 + option httpchk + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + server postgresql_{{ postgresql_1 }}_5432 {{ postgresql_1 }}:5432 maxconn 100 check port 8008 + server postgresql_{{ postgresql_2 }}_5432 {{ postgresql_2 }}:5432 maxconn 100 check port 8008 + server postgresql_{{ postgresql_3 }}_5432 {{ postgresql_3 }}:5432 maxconn 100 check port 8008 \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/vars/main.yml b/ansible/roles/ansible-haproxy/vars/main.yml new file mode 100644 index 0000000000..2070e21bba --- /dev/null +++ b/ansible/roles/ansible-haproxy/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible-haproxy diff --git a/ansible/roles/ansible-postgres_patroni/README.md b/ansible/roles/ansible-postgres_patroni/README.md new file mode 100644 index 0000000000..37f2988184 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/README.md @@ -0,0 +1,100 @@ +Role Name +========= +``` +postgresql-cluster-ansible +``` +Requirements +------------ +``` +1. comment or uncomment the properties in templates of the roles available as per the requirement. +2. provide the variables where ever required. +``` +Role Variables +-------------- +``` +In hosts files: +1. etcd_ip : +2. postgresql_origin: +3. postgresql_1: +4. postgresql_2: +5. postgresql_3: + + +etcd Role variables: +postgres_patroni_etcd_name: "postgres-etcd" # cluster name +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" # initial cluster +postgres_patroni_etcd_initial_cluster_state: "postgres" # initial cluster state +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" # initial cluster token +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" # initial advertise peer urls +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" # listen peer urls +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" # listen client urls +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" # advertise client urls + +Ansible-postgres_patroni role Variables: +#patroni .yaml config +Postgres_cluster_name: postgresql-prod # Cluster name + +# users admin password +postgres_patroni_admin_password: admin # Admin Password + +#Authentication +# Replication +postgres_patroni_replication_username: replicator # Replication Username +postgres_patroni_replication_password: password # Replication password + +#SuperUser +postgres_patroni_superuser_username: postgres # Superuser username +postgres_patroni_superuser_password: password # Superuser Password +``` +Architecture +------------ +![Untitled Diagram (1)](https://user-images.githubusercontent.com/63706239/203470986-f8ec3d56-a6d2-4678-b594-dc20a29ec972.jpg) + +``` +Description: +Ansible postgres cluter role is used to setup a postgres cluster with 1 Primary and 2 replicas where we are using the patroni as HA solution for postgres cluster.Patroni can be configured to handle tasks like replication, backups and restorations.We are also using HAProxy load Balancer to route the traffic and Etcd is a fault-tolerant, distributed key-value store that is used to store the state of the Postgres cluster. Via Patroni, all of the Postgres nodes make use of etcd to keep the Postgres cluster up and running. + +Users and applications can access the postgres server using Haproxy IP and Port defined in the haproxy configuration rules. +``` + +Inventory hosts file as shown Below +----------------------------------- +``` +[etcd] +192.168.245.129 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[postgresql] +192.168.245.129 postgresql_origin=192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[haproxy] +192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 ansible_ssh_user=ubuntu +``` + +License +------- +``` +BSD +``` +Author Information +------------------ +``` +Nikhil Varma + +Senior DevOps Engineer +``` + +postgres cluster setup using ansible +----------------------------------- + +``` +# Command to run Ansibe-postgresql role + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass + +# Commands to run postgresql roles by using the tags and skipping the tags + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --tags="" +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --skip-tags="" +``` diff --git a/ansible/roles/ansible-postgres_patroni/defaults/main.yml b/ansible/roles/ansible-postgres_patroni/defaults/main.yml new file mode 100644 index 0000000000..5257a8524d --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# defaults file for ansible-postgres_patroni +#patroni .yaml config +postgres_cluster_name: postgresql-prod + +# users admin password +postgres_patroni_admin_password: admin + +#Authentication +# Replication +postgres_patroni_replication_username: replicator +postgres_patroni_replication_password: password + +#SuperUser +postgres_patroni_superuser_username: postgres +postgres_patroni_superuser_password: password \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/handlers/main.yml b/ansible/roles/ansible-postgres_patroni/handlers/main.yml new file mode 100644 index 0000000000..91f2ff6304 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/handlers/main.yml @@ -0,0 +1,18 @@ +--- +# handlers file for ansible-postgres_patroni +- name: Restart patroni systemd + systemd: + name: patroni.service + state: restarted + daemon_reload: yes + +- name: Restart patroni service + systemd: + name: patroni.service + state: restarted + +- name: Start the postgresql service + systemd: + name: postgresql.service + state: started + enabled: yes \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/meta/main.yml b/ansible/roles/ansible-postgres_patroni/meta/main.yml new file mode 100644 index 0000000000..0538e5f1cd --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: Nikhil Varma + description: Ansible role for setting up postgresql cluster + #company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/roles/ansible-postgres_patroni/tasks/main.yml b/ansible/roles/ansible-postgres_patroni/tasks/main.yml new file mode 100644 index 0000000000..75f16a30d5 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/tasks/main.yml @@ -0,0 +1,105 @@ +--- +# tasks file for ansible-postgres_patroni + +- name: Install postgresql after updating apt + apt: + name: + - postgresql + - postgresql-contrib + state: present + update_cache: yes + tags: + - postgresql Install + +- name: Stop the postgresql service + systemd: + name: postgresql.service + state: stopped + enabled: yes + tags: + - postgresql_service + +- name: creating softlink for postgres + ansible.builtin.shell: + cmd: ln -s /usr/lib/postgresql/15/bin/* /usr/sbin/ + tags: + - softlink + +- name: Install and update python and pip + apt: + name: + - python3-pip + - python3-dev + - libpq-dev + state: present + tags: + - pip_python + +- name: Upgrade pip to latest vesion + pip: + name: pip + extra_args: --upgrade + state: latest + tags: + - upgrade_pip + +- name: Install patroni and dependencies + pip: + name: + - patroni + - python-etcd + - psycopg2 + state: present + tags: + - install patroni + +- name: Creates data directory for patroni + file: + path: /data + state: directory + mode: 0700 + owner: postgres + group: postgres + tags: + - create_data_dir + +- name: Creates data directory for patroni + file: + path: /data/patroni + state: directory + mode: 0700 + owner: postgres + group: postgres + tags: + - create_patroni_dir + + +- name: Template patroni systemd service file to /etc/systemd/system/patroni.service + template: + src: patroni.service.j2 + dest: /etc/systemd/system/patroni.service + tags: + - patroni_service + +- name: Restart patroni systemd + systemd: + name: patroni.service + state: restarted + daemon_reload: yes + +- name: Template configuration file to patroni.yaml + template: + src: patroni.yaml.j2 + dest: '/etc/patroni.yaml' + tags: + - patroni_config + +- name: Restart patroni service + systemd: + name: patroni.service + state: restarted + +- name: Restart postgres service + systemd: + name: postgresql.service + state: restarted diff --git a/ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 b/ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 new file mode 100644 index 0000000000..d2bbe844e0 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=Runners to orchestrate a high-availability PostgreSQL +After=syslog.target network.target + +[Service] +Type=simple + +User=postgres +Group=postgres + +ExecStart=/usr/local/bin/patroni /etc/patroni.yaml +KillMode=process +TimeoutSec=30 +Restart=no + +[Install] +WantedBy=multi-user.targ \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 b/ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 new file mode 100644 index 0000000000..92d5635e46 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 @@ -0,0 +1,58 @@ +scope: postgres +namespace: /db/ +name: {{ postgres_cluster_name }} + +restapi: + listen: {{ postgresql_origin }}:8008 + connect_address: {{ postgresql_origin }}:8008 + +etcd: + host: {{ etcd_ip }}:2379 + +bootstrap: + dcs: + ttl: 30 + loop_wait: 10 + retry_timeout: 10 + maximum_lag_on_failover: 1048576 + postgresql: + use_pg_rewind: true + + initdb: + - encoding: UTF8 + - data-checksums + + pg_hba: + - host replication replicator 127.0.0.1/32 md5 + - host replication replicator {{ postgresql_1 }}/0 md5 + - host replication replicator {{ postgresql_2 }}/0 md5 + - host replication replicator {{ postgresql_3 }}/0 md5 + - host all all 0.0.0.0/0 md5 + + users: + admin: + password: {{ postgres_patroni_admin_password }} + options: + - createrole + - createdb + +postgresql: + listen: {{ postgresql_origin }}:5432 + connect_address: {{ postgresql_origin }}:5432 + data_dir: /data/patroni + pgpass: /tmp/pgpass + authentication: + replication: + username: {{ postgres_patroni_replication_username }} + password: {{ postgres_patroni_replication_password }} + superuser: + username: {{ postgres_patroni_superuser_username }} + password: {{ postgres_patroni_superuser_password }} + parameters: + unix_socket_directories: '.' + +tags: + nofailover: false + noloadbalance: false + clonefrom: false + nosync: false \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/vars/main.yml b/ansible/roles/ansible-postgres_patroni/vars/main.yml new file mode 100644 index 0000000000..bf8074823c --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible-postgres_patroni From ba01495ad7aafcf3556ad053ba02563bd2e29975 Mon Sep 17 00:00:00 2001 From: Rekha Date: Mon, 26 Dec 2022 16:34:25 +0530 Subject: [PATCH 155/203] Prashnavali reminder query added --- .../roles/postgres-migration/files/sunbird_programs/V5.1.0.sql | 1 + 1 file changed, 1 insertion(+) create mode 100644 ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql diff --git a/ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql b/ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql new file mode 100644 index 0000000000..1780b3118b --- /dev/null +++ b/ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql @@ -0,0 +1 @@ +INSERT INTO "public"."configuration" ("key", "value", "status") VALUES ('PrashnavaliReminder', ' VidyaDaan: Reminder to kindly create or review pending questions for the Project:$projectName by $projectDate. Log in via https://vdn.diksha.gov.in/contribute. Please ignore if work has already been completed.', 'active'); From 8a73f684521c48c35c0a3253131c3b055ab8330e Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 27 Dec 2022 12:04:46 +0530 Subject: [PATCH 156/203] added new jenkins adhoc job for ml-analytics #ED-569 (#3675) --- ansible/ml-analytics-adhoc.yaml | 4 + .../ml-analytics-adhoc-jobs/tasks/main.yaml | 106 ++++++++++++++ .../roles/ml-analytics-service/tasks/main.yml | 2 +- .../jobs/ml-analytics-adhoc/config.xml | 130 ++++++++++++++++++ .../deploy/ml-analytics-adhoc/Jenkinsfile | 47 +++++++ 5 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 ansible/ml-analytics-adhoc.yaml create mode 100644 ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml create mode 100644 pipelines/deploy/ml-analytics-adhoc/Jenkinsfile diff --git a/ansible/ml-analytics-adhoc.yaml b/ansible/ml-analytics-adhoc.yaml new file mode 100644 index 0000000000..f870c5434e --- /dev/null +++ b/ansible/ml-analytics-adhoc.yaml @@ -0,0 +1,4 @@ +- hosts: ml-analytics-service + become: yes + roles: + - ml-analytics-adhoc-jobs diff --git a/ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml b/ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml new file mode 100644 index 0000000000..abfb50e3bb --- /dev/null +++ b/ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml @@ -0,0 +1,106 @@ +- name: Fetch Config file + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/config.ini" + tags: + - fetch-config + +- name: Execute run.sh + become: yes + become_user: data-pipeline + shell: "/opt/sparkjobs/ml-analytics-service/run.sh > /opt/sparkjobs/ml-analytics-service/run_job.log" + tags: + - run-job + +- name: Fetch run_job.log + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/run_job.log" + tags: + - run-job + +- name: Execute run_weekly.sh + become: yes + become_user: data-pipeline + shell: "/opt/sparkjobs/ml-analytics-service/run_weekly.sh > /opt/sparkjobs/ml-analytics-service/run_weekly_job.log" + tags: + - run-weekly + +- name: Fetch run_weekly.log + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/run_weekly_job.log" + tags: + - run-weekly + +- name: Execute run_program.sh + become: yes + become_user: data-pipeline + shell: "/opt/sparkjobs/ml-analytics-service/run_program.sh > /opt/sparkjobs/ml-analytics-service/run_program_job.log" + tags: + - run-program + +- name: Fetch run_program_job.log + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/run_program_job.log" + tags: + - run-program + +- name: Execute Observation ingest/refresh + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/observations/pyspark_observation_status_batch.py" + register: out + tags: + - observation-refresh-ingest + +- debug: + var: out.stdout_lines + tags: + - observation-refresh-ingest + +- name: Execute Survey ingest/refresh + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/survey/pyspark_survey_status.py" + register: out + args: + executable: /bin/bash + tags: + - survey-refresh-ingest + +- debug: + var: out.stdout_lines + tags: + - survey-refresh-ingest + +- name: Execute Project Refresh + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/projects/pyspark_project_deletion_batch.py" + register: out + args: + executable: /bin/bash + tags: + - project-refresh + +- debug: + var: out.stdout_lines + tags: + - project-refresh + +- name: Execute Project ingest + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/projects/pyspark_project_batch.py" + register: out + args: + executable: /bin/bash + tags: + - project-ingest + +- debug: + var: out.stdout_lines + tags: + - project-ingest diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index ee609b8806..30b61a06cd 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -145,4 +145,4 @@ minute: "30" hour: "7" weekday: "4" - job: "{{ BASEPATH }}/run_weekly.sh > {{ BASEPATH }}/ml-analytics-service/nvsk_data_weekly.logs" + job: "{{ BASEPATH }}/ml-analytics-service/run_weekly.sh > {{ BASEPATH }}/ml-analytics-service/nvsk_data_weekly.logs" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml new file mode 100644 index 0000000000..eeb6fdfe2b --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml @@ -0,0 +1,130 @@ + + + + false + + + false + false + + + + + private_branch + choice-parameter-3803648169564146 + 1 + + true + + + + true + + + ml-analytics-adhoc + Deploy/dev/managed-learn/ml-analytics-adhoc + + + ET_FORMATTED_HTML + true + + + branch_or_tag + choice-parameter-3803648170694062 + 1 + + true + + + + true + + + ml-analytics-adhoc + Deploy/dev/managed-learn/ml-analytics-adhoc + + + ET_FORMATTED_HTML + true + + + action + <font color=green size=2><b>Choose the job names to run. Multi-selection is available.</b></font> + choice-parameter-3812862131559945 + 1 + + true + + + + ml-analytics-adhoc + Deploy/dev/managed-learn/ml-analytics-adhoc + + + PT_MULTI_SELECT + false + 1 + + + + + 0 + 0 + + false + project + false + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + github-cred + + + + + ${branch_or_tag} + + + false + + + + pipelines/deploy/ml-analytics-adhoc/Jenkinsfile + false + + + false + diff --git a/pipelines/deploy/ml-analytics-adhoc/Jenkinsfile b/pipelines/deploy/ml-analytics-adhoc/Jenkinsfile new file mode 100644 index 0000000000..8a2f010d6b --- /dev/null +++ b/pipelines/deploy/ml-analytics-adhoc/Jenkinsfile @@ -0,0 +1,47 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + cleanWs() + checkout scm + } + ansiColor('xterm') { + + stage('Deploy') { + values = [:] + sh 'echo "${currentWs} is this"' + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + ansiblePlaybook = "${currentWs}/ansible/ml-analytics-adhoc.yaml" + ansibleExtraArgs = "--tags ${params.action} --vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = "SUCCESS" + currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + summary() + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From b2c76008bb427b2260f8f9ee64d4d46f8ac69332 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 13:59:06 +0530 Subject: [PATCH 157/203] fix: https:// already part of another url Signed-off-by: Keshav Prasad --- .../helm_charts/core/nginx-public-ingress/values.j2 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index c1331e76a5..1b9a670a37 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -557,7 +557,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/v3/preview/$url_full; + proxy_pass $s3_bucket/v3/preview/$url_full; } location ~ /content-editor/telemetry|collection-editor/telemetry { rewrite ^/(.*) /$1 break; @@ -604,7 +604,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/content-editor/$url_full; + proxy_pass $s3_bucket/content-editor/$url_full; } location ~* ^/discussion-ui/(.*) { # Enabling compression @@ -628,7 +628,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/discussion-ui/$url_full; + proxy_pass $s3_bucket/discussion-ui/$url_full; } location ~* ^/collection-editor/(.*) { # Enabling compression @@ -661,7 +661,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/collection-editor/$url_full; + proxy_pass $s3_bucket/collection-editor/$url_full; } location ~* ^/generic-editor/(.*) { # Enabling compression @@ -694,7 +694,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/generic-editor/$url_full; + proxy_pass $s3_bucket/generic-editor/$url_full; } location ~* ^/content-plugins/(.*) { # Enabling cache for Response code 200 @@ -731,7 +731,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/content-plugins/$url_full; + proxy_pass $s3_bucket/content-plugins/$url_full; } location /thirdparty { # Enabling cache for Response code 200 From 3da4be417edb55763bc5a33d470fdb0e6a0ac54d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 14:43:55 +0530 Subject: [PATCH 158/203] fix: updated split logic based on url value Signed-off-by: Keshav Prasad --- .../stack-proxy/templates/proxy-default.conf | 16 ++++++------- .../core/nginx-public-ingress/values.j2 | 24 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ansible/roles/stack-proxy/templates/proxy-default.conf b/ansible/roles/stack-proxy/templates/proxy-default.conf index f98ba2aae9..9ff34d8dcb 100644 --- a/ansible/roles/stack-proxy/templates/proxy-default.conf +++ b/ansible/roles/stack-proxy/templates/proxy-default.conf @@ -306,7 +306,7 @@ server { set $bucket "{{upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -339,10 +339,10 @@ server { return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -354,7 +354,7 @@ server { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$s3_bucket/v3/preview/$url_full; + proxy_pass https://$bucket/v3/preview/$url_full; } location ~* ^/content-plugins/(.*) { @@ -378,11 +378,11 @@ location ~* ^/content-plugins/(.*) { add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -394,7 +394,7 @@ location ~* ^/content-plugins/(.*) { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$s3_bucket/content-plugins/$url_full; + proxy_pass https://$bucket/content-plugins/$url_full; } location /thirdparty { @@ -448,7 +448,7 @@ location ~* ^/desktop/(.*) { set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index 1b9a670a37..678a12bb4d 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -541,7 +541,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -557,7 +557,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/v3/preview/$url_full; + proxy_pass $bucket/v3/preview/$url_full; } location ~ /content-editor/telemetry|collection-editor/telemetry { rewrite ^/(.*) /$1 break; @@ -588,7 +588,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -604,7 +604,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/content-editor/$url_full; + proxy_pass $bucket/content-editor/$url_full; } location ~* ^/discussion-ui/(.*) { # Enabling compression @@ -612,7 +612,7 @@ proxyconfig: |- gzip_min_length 100000; gzip_proxied expired no-cache no-store private auth; gzip_types application/javascript application/x-javascript text/css text/javascript; - set $s3_bucket "{{discussion_upstream_url}}"; + set $bucket "{{discussion_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{discussion_upstream_url.split('/')[0]|lower}}"; @@ -628,7 +628,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/discussion-ui/$url_full; + proxy_pass $bucket/discussion-ui/$url_full; } location ~* ^/collection-editor/(.*) { # Enabling compression @@ -645,7 +645,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -661,7 +661,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/collection-editor/$url_full; + proxy_pass $bucket/collection-editor/$url_full; } location ~* ^/generic-editor/(.*) { # Enabling compression @@ -678,7 +678,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -694,7 +694,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/generic-editor/$url_full; + proxy_pass $bucket/generic-editor/$url_full; } location ~* ^/content-plugins/(.*) { # Enabling cache for Response code 200 @@ -715,7 +715,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -731,7 +731,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/content-plugins/$url_full; + proxy_pass $bucket/content-plugins/$url_full; } location /thirdparty { # Enabling cache for Response code 200 From d3e9c94d9404b1b1f22000d608ce618255d435bd Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 14:46:37 +0530 Subject: [PATCH 159/203] fix: removed https:// url string Signed-off-by: Keshav Prasad --- ansible/roles/stack-proxy/templates/proxy-default.conf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/stack-proxy/templates/proxy-default.conf b/ansible/roles/stack-proxy/templates/proxy-default.conf index 9ff34d8dcb..9f78549da2 100644 --- a/ansible/roles/stack-proxy/templates/proxy-default.conf +++ b/ansible/roles/stack-proxy/templates/proxy-default.conf @@ -318,7 +318,7 @@ server { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$bucket/$url_full; + proxy_pass $bucket/$url_full; } @@ -354,7 +354,7 @@ server { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$bucket/v3/preview/$url_full; + proxy_pass $bucket/v3/preview/$url_full; } location ~* ^/content-plugins/(.*) { @@ -394,7 +394,7 @@ location ~* ^/content-plugins/(.*) { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$bucket/content-plugins/$url_full; + proxy_pass $bucket/content-plugins/$url_full; } location /thirdparty { @@ -460,7 +460,7 @@ location ~* ^/desktop/(.*) { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$offline_bucket/$url_full; + proxy_pass $offline_bucket/$url_full; } location / { From 854398a2f2af283fbf1e5f4319e72ec856b5ec69 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 14:52:42 +0530 Subject: [PATCH 160/203] fix: update url array index Signed-off-by: Keshav Prasad --- .../core/nginx-public-ingress/values.j2 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index 678a12bb4d..cbc2e40f4e 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -511,7 +511,7 @@ proxyconfig: |- set $bucket "{{upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -544,7 +544,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -591,7 +591,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -615,7 +615,7 @@ proxyconfig: |- set $bucket "{{discussion_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{discussion_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{discussion_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -648,7 +648,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -681,7 +681,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -718,7 +718,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -778,7 +778,7 @@ proxyconfig: |- set $offline_bucket "{{ sunbird_offline_azure_storage_account_url }}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; From 045d158e1199dfed0908325ee0a7fb6be4a332a8 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 17:37:04 +0530 Subject: [PATCH 161/203] fix: remove hardcoded https from proxy_pass value (#3688) --- kubernetes/helm_charts/core/nginx-public-ingress/values.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index cbc2e40f4e..e0699cdb50 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -524,7 +524,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$bucket/$url_full; + proxy_pass $bucket/$url_full; } location ~* ^/content/preview/(.*) { # Enabling compression From b87d0d3c3843891baf465ba6afa48c11c50e5a72 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Dec 2022 13:41:06 +0530 Subject: [PATCH 162/203] fix: remove https as its included in the var Signed-off-by: Keshav Prasad --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 9cb6473418..30d4e1830f 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -195,7 +195,7 @@ sunbird_telemetry_service_local_url={{sunbird_telemetry_service_local_url | defa #Release-4.4.0 sunbird_portal_video_max_size={{sunbird_portal_video_max_size | default(150)}} sunbird_default_file_size={{sunbird_default_file_size | default(150)}} -sunbird_portal_uci_blob_url={{ sunbird_portal_uci_blob_url | default('https://' + cloud_storage_url + '/uci') }} +sunbird_portal_uci_blob_url={{ sunbird_portal_uci_blob_url | default(cloud_storage_url + '/uci') }} portal_redirect_error_callback_domain={{portal_redirect_error_callback_domain | default("https://"+domain_name)}} sunbird_portal_uci_bot_phone_number={{sunbird_portal_uci_bot_phone_number | default(+912249757677)}} From 15394c46e23de68a5aa74b818e82b1e4ff9eccc6 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Wed, 28 Dec 2022 21:59:39 +0530 Subject: [PATCH 163/203] Project certificate download uris edited (#3691) typo correction --- ansible/roles/kong-api/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index d7589ae22e..7af41e2149 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9795,7 +9795,7 @@ kong_apis: config.enabled: true - name: getProjectRCCertificate - uris: "{{ registry_service_prefix }}/projetCertificate/v1/download" + uris: "{{ registry_service_prefix }}/projectCertificate/v1/download" upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" strip_uri: true plugins: @@ -10000,4 +10000,4 @@ kong_apis: config.allowed_payload_size: "{{ small_request_size_limit }}" - name: opa-checks config.required: true - config.enabled: true \ No newline at end of file + config.enabled: true From e9978e75870c46a5eb7f33071eb1d9d1d657f949 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Thu, 29 Dec 2022 11:28:00 +0530 Subject: [PATCH 164/203] Release 5.1.0 - CSP changes (#3692) * Update management bucketname for ES * Update ES snapshot roles --- ansible/roles/es-azure-snapshot/defaults/main.yml | 12 +++--------- ansible/roles/es-gcs-snapshot/defaults/main.yml | 9 ++++++--- ansible/roles/es-s3-snapshot/defaults/main.yml | 9 ++++++--- ansible/roles/es6/tasks/plugins/repository-gcs.yml | 4 ++-- ansible/roles/es6/tasks/plugins/repository-s3.yml | 4 ++-- .../roles/log-es6/tasks/plugins/repository-gcs.yml | 4 ++-- .../roles/log-es6/tasks/plugins/repository-s3.yml | 4 ++-- 7 files changed, 23 insertions(+), 23 deletions(-) diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index 396746aa32..df52870977 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -1,7 +1,7 @@ snapshot_create_request_body: { type: azure, settings: { - container: "{{ es_backup_storage }}", + container: "{{ cloud_storage_esbackup_foldername }}", base_path: "{{ snapshot_base_path }}_{{ base_path_date }}" } } @@ -10,11 +10,5 @@ snapshot_create_request_body: { es_snapshot_host: "localhost" snapshot_base_path: "default" -es_azure_backup_container_name: "elasticsearch-snapshots" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -es_backup_storage: "{{ es_azure_backup_container_name }}" \ No newline at end of file +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es-gcs-snapshot/defaults/main.yml b/ansible/roles/es-gcs-snapshot/defaults/main.yml index 5e3cbece6f..23fa7c5ef1 100644 --- a/ansible/roles/es-gcs-snapshot/defaults/main.yml +++ b/ansible/roles/es-gcs-snapshot/defaults/main.yml @@ -1,12 +1,15 @@ snapshot_create_request_body: { type: gcs, settings: { - bucket: "{{ gcs_management_bucket_name }}", - base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" + bucket: "{{ cloud_storage_management_bucketname }}", + base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" + +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es-s3-snapshot/defaults/main.yml b/ansible/roles/es-s3-snapshot/defaults/main.yml index 7ddda6ebd0..3a55471ccf 100644 --- a/ansible/roles/es-s3-snapshot/defaults/main.yml +++ b/ansible/roles/es-s3-snapshot/defaults/main.yml @@ -1,12 +1,15 @@ snapshot_create_request_body: { type: s3, settings: { - bucket: "{{ aws_management_bucket_name }}", - base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" + bucket: "{{ cloud_storage_esbackup_bucketname }}", + base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" + +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es6/tasks/plugins/repository-gcs.yml b/ansible/roles/es6/tasks/plugins/repository-gcs.yml index 7d5c32e52e..6a32c0051a 100644 --- a/ansible/roles/es6/tasks/plugins/repository-gcs.yml +++ b/ansible/roles/es6/tasks/plugins/repository-gcs.yml @@ -3,7 +3,7 @@ become: yes copy: dest: "{{ conf_dir }}/gcs_management_bucket_service_account.json" - content: "{{ gcs_management_bucket_service_account }}" + content: "{{ cloud_management_storage_secret }}" - name: Add gcs service account file to keystore become: yes @@ -15,4 +15,4 @@ - name: Remove the service account file file: path: "{{ conf_dir }}/gcs_management_bucket_service_account.json" - state: absent \ No newline at end of file + state: absent diff --git a/ansible/roles/es6/tasks/plugins/repository-s3.yml b/ansible/roles/es6/tasks/plugins/repository-s3.yml index b5897792ab..07655d6746 100644 --- a/ansible/roles/es6/tasks/plugins/repository-s3.yml +++ b/ansible/roles/es6/tasks/plugins/repository-s3.yml @@ -1,14 +1,14 @@ --- - name: Add default aws account name for backups become: yes - shell: echo "{{ aws_management_bucket_user_access_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" - name: Add default aws account key for backups become: yes - shell: echo "{{ aws_management_bucket_user_secret_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" diff --git a/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml b/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml index 81078e173d..7d1c1fbd4a 100644 --- a/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml +++ b/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml @@ -3,7 +3,7 @@ become: yes copy: dest: "{{ es_conf_dir }}/gcs_management_bucket_service_account.json" - content: "{{ gcs_management_bucket_service_account }}" + content: "{{ cloud_management_storage_secret }}" - name: Add gcs service account file to keystore become: yes @@ -15,4 +15,4 @@ - name: Remove the service account file file: path: "{{ es_conf_dir }}/gcs_management_bucket_service_account.json" - state: absent \ No newline at end of file + state: absent diff --git a/ansible/roles/log-es6/tasks/plugins/repository-s3.yml b/ansible/roles/log-es6/tasks/plugins/repository-s3.yml index 344af29e6e..2c05927255 100644 --- a/ansible/roles/log-es6/tasks/plugins/repository-s3.yml +++ b/ansible/roles/log-es6/tasks/plugins/repository-s3.yml @@ -1,14 +1,14 @@ --- - name: Add default aws account name for backups become: yes - shell: echo "{{ aws_management_bucket_user_access_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key no_log: True environment: ES_PATH_CONF: "{{ es_conf_dir }}" - name: Add default aws account key for backups become: yes - shell: echo "{{ aws_management_bucket_user_secret_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key no_log: True environment: ES_PATH_CONF: "{{ es_conf_dir }}" From 13dfc4709969435a7e735048f098f8e23b2f98bc Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 29 Dec 2022 11:27:06 +0530 Subject: [PATCH 165/203] ED-621: added placeholder variable --- ansible/inventory/env/group_vars/all.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 8dfdd8a43d..d86c71d018 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -133,6 +133,7 @@ cassandra_backup_dir: /data/cassandra/backup cassandra_multi_dc_enabled: false # Release-5.0.1 cloud_storage_base_url: "{{cloud_storage_base_url}}" +cloud_store_base_path_placeholder: "{{ cloud_store_base_path_placeholder }}" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From bf68b0d50a4383d90f4ad0b0c1c526eabda8f2bf Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 29 Dec 2022 11:41:08 +0530 Subject: [PATCH 166/203] removed variable --- ansible/inventory/env/group_vars/all.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index d86c71d018..8dfdd8a43d 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -133,7 +133,6 @@ cassandra_backup_dir: /data/cassandra/backup cassandra_multi_dc_enabled: false # Release-5.0.1 cloud_storage_base_url: "{{cloud_storage_base_url}}" -cloud_store_base_path_placeholder: "{{ cloud_store_base_path_placeholder }}" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From fd9a8f4be0fa8552ff2036b419b95131c4595d61 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 29 Dec 2022 18:20:21 +0530 Subject: [PATCH 167/203] ED-621: added placeholder variable --- ansible/inventory/env/group_vars/all.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 8dfdd8a43d..153be0f813 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -133,6 +133,7 @@ cassandra_backup_dir: /data/cassandra/backup cassandra_multi_dc_enabled: false # Release-5.0.1 cloud_storage_base_url: "{{cloud_storage_base_url}}" +cloud_store_base_path_placeholder: "$CLOUD_BASE_PATH" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From 891b73c7b3c5ee97ec8e93502d8eda804ce746f0 Mon Sep 17 00:00:00 2001 From: anilgupta Date: Thu, 29 Dec 2022 18:35:42 +0530 Subject: [PATCH 168/203] Issue #KN-439 chore: Added the transcripts in cloudstorage_metadata_list. --- ansible/roles/stack-sunbird/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 036fda51bd..65268e7477 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1052,7 +1052,7 @@ kong_desktop_device_consumer_names_for_opa: '["desktop"]' cloudstorage_relative_path_prefix_content: "CONTENT_STORAGE_BASE_PATH" cloudstorage_relative_path_prefix_dial: "DIAL_STORAGE_BASE_PATH" -cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' +cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl", "transcripts"]' ### inQuiry assessment service default values inquiry_schema_path: "{{ kp_schema_base_path }}" From 389013a3ff93736d670486ddc8df1f198fb5c1fa Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Fri, 30 Dec 2022 09:58:27 +0530 Subject: [PATCH 169/203] Add gcp vars for service accounts (#3696) --- ansible/artifacts-download.yml | 2 ++ ansible/artifacts-upload.yml | 2 ++ ansible/assets-upload.yml | 2 ++ ansible/deploy-plugins.yml | 2 ++ ansible/desktop-faq-upload.yml | 10 ++++++++++ ansible/dial_upload-schema.yml | 2 ++ ansible/kp_upload-schema.yml | 2 ++ ansible/roles/cassandra-backup/tasks/main.yml | 2 ++ ansible/roles/cassandra-restore/tasks/main.yml | 2 ++ ansible/roles/cert-templates/tasks/main.yml | 2 ++ ansible/roles/desktop-deploy/tasks/main.yml | 2 ++ ansible/roles/gcp-cloud-storage/defaults/main.yml | 5 +++++ ansible/roles/grafana-backup/tasks/main.yml | 2 ++ ansible/roles/jenkins-backup-upload/tasks/main.yml | 2 ++ ansible/roles/mongodb-backup/tasks/main.yml | 2 ++ .../postgres-managed-service-backup/tasks/main.yml | 2 ++ .../postgres-managed-service-restore/tasks/main.yml | 2 ++ ansible/roles/postgresql-backup/tasks/main.yml | 2 ++ ansible/roles/postgresql-restore/tasks/main.yml | 2 ++ ansible/roles/prometheus-backup-v2/tasks/main.yml | 2 ++ ansible/roles/prometheus-backup/tasks/main.yml | 2 ++ ansible/roles/prometheus-restore/tasks/main.yml | 2 ++ ansible/roles/redis-backup/tasks/main.yml | 2 ++ ansible/uploadFAQs.yml | 2 ++ 24 files changed, 59 insertions(+) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 46167180e4..2fc2748229 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -21,6 +21,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_artifact_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_artifact_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 3bdbe73017..305492afc2 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -22,6 +22,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_artifact_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_artifact_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 09e7df6ceb..b8d5836cc6 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -56,6 +56,8 @@ block: - name: set common gcloud variables set_fact: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" gcp_path: "" file_delete_pattern: "" diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index a78ce1c640..5774a12454 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -141,6 +141,8 @@ name: gcp-cloud-storage tasks_from: "{{ item[0] }}" vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" file_delete_pattern: "content-plugins/{{ item[1] }}/*" gcp_path: "content-plugins/{{ item[1] }}" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 3683202043..a95e8828e7 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -95,6 +95,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" tags: - upload-desktop-faq @@ -105,6 +107,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_private_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_private_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_label_bucketname }}" tags: - upload-label @@ -115,6 +119,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" tags: - upload-chatbot-config @@ -125,6 +131,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" tags: - upload-csv-template @@ -135,6 +143,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" tags: - upload-discussion-ui diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 757a80f6e5..75609bde68 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -50,6 +50,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_dial_bucketname }}" gcp_path: "schemas/local" local_file_or_folder_path: "dial_schema_template_files" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index d12b74433d..0f029d7cbd 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -34,6 +34,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" gcp_path: "{{ schemas/local" local_file_or_folder_path: "{{ source_name }}" diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index 0e5ae87477..54941f8343 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -63,6 +63,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 4bd8c05991..8d74c4c695 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -37,6 +37,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 0caf2b1bfe..893e0776f4 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -62,6 +62,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_private_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_private_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_certservice_bucketname }}" gcp_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index ba077b778f..6a01f97b0c 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -108,6 +108,8 @@ block: - name: set common gcloud variables set_fact: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" - name: upload batch of files to gcloud storage diff --git a/ansible/roles/gcp-cloud-storage/defaults/main.yml b/ansible/roles/gcp-cloud-storage/defaults/main.yml index b0fd847b26..a9f4247d42 100644 --- a/ansible/roles/gcp-cloud-storage/defaults/main.yml +++ b/ansible/roles/gcp-cloud-storage/defaults/main.yml @@ -1,3 +1,8 @@ +# GCP service account name +# Example - +# gcp_storage_service_account_name: test@sunbird.iam.gserviceaccount.com +gcp_storage_service_account_name: "" + # GCP bucket name # Example - # bucket_name: "sunbird-dev-public" diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 90dc3526ca..a41b01c2aa 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -50,6 +50,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_grafanabackup_bucketname }}" gcp_path: "{{ cloud_storage_grafanabackup_foldername }}/{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index 89d8f3e29c..612557a61b 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -43,6 +43,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_jenkinsbackup_bucketname }}" gcp_path: "{{ cloud_storage_jenkinsbackup_foldername }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index f51216b14f..fe0aa286bd 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -45,6 +45,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_mongodbbackup_bucketname }}" gcp_path: "{{ cloud_storage_mongodbbackup_foldername }}/{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index ba101e2509..588b8fc5b5 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -72,6 +72,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 58d2c53482..1b499e338f 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -39,6 +39,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 5b3303bf97..fd4da5b8cc 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -44,6 +44,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index e076590f23..e57a321a29 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -34,6 +34,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 4a65bb6f8f..0323ed4d84 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -47,6 +47,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 10d8e2fb3b..a665540f8a 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -56,6 +56,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 440b777fe4..60d9bd39bd 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -32,6 +32,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index f1cf35622f..9863fe5f28 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -44,6 +44,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_redisbackup_bucketname }}" gcp_path: "{{ cloud_storage_redisbackup_foldername }}/{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index cf90e343d1..b38f2ff99a 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -50,6 +50,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" dest_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" From fe6b33643928c377a2a9a9c66ac07ecf66fed547 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 2 Jan 2023 14:23:28 +0530 Subject: [PATCH 170/203] Fix the command to get deployed image (#3698) --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index 8f4881089a..86c3386a1e 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -118,7 +118,7 @@ ignore_errors: true - name: Get deployed image name - deployments - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: image - set_fact: From 084eb4feec8d35a7e971e0a3898410a85d0a9f5d Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 2 Jan 2023 17:23:45 +0530 Subject: [PATCH 171/203] Fix gcp deploy issues (#3699) * Fix typo * Fix the command to get deployed image --- ansible/kp_upload-schema.yml | 2 +- kubernetes/ansible/roles/deploy-player/tasks/main.yml | 2 +- kubernetes/ansible/roles/helm-daemonset/tasks/main.yml | 2 +- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 4 ++-- kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 0f029d7cbd..3a28ce5782 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -37,6 +37,6 @@ gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" - gcp_path: "{{ schemas/local" + gcp_path: "schemas/local" local_file_or_folder_path: "{{ source_name }}" when: cloud_service_provider == "gcloud" diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index 52500df2e3..0aa27af993 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -74,7 +74,7 @@ delay: 30 - name: Get deployed image name - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: deployed_image - set_fact: diff --git a/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml b/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml index e04c4f137f..91fcc9f979 100644 --- a/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml @@ -25,7 +25,7 @@ delay: 30 - name: Get deployed image name - shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" + shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]'" register: deployed_image - set_fact: diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index 86c3386a1e..16b62ee9d6 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -132,7 +132,7 @@ ignore_errors: true - name: Get deployed image name - daemonsets - shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" + shell: kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]' register: image - set_fact: @@ -146,7 +146,7 @@ ignore_errors: true - name: Get deployed image name - statefulsets - shell: "kubectl get statefulsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" + shell: kubectl get statefulsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]' register: image - set_fact: diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index 09e96cf25e..6d0b7ef387 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -89,7 +89,7 @@ delay: 30 - name: Get deployed image name - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: deployed_image - set_fact: From 16e0cd771206f9a035b349f8696ed2aac1e13bc4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 3 Jan 2023 17:51:36 +0530 Subject: [PATCH 172/203] Added new variables (#3701) --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 ++ ansible/roles/ml-analytics-service/templates/config.j2 | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index d73099451d..15f9b438c4 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -114,3 +114,5 @@ cloud_public_storage_region: "{{ cloud_public_storage_region }}" cloud_public_storage_endpoint: "{{ cloud_public_storage_endpoint }}" ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" ml_analytics_projects_program_filename: "{{ config_path }}/projects/program_ids.txt" +ml_analytics_nvsk_imp_projects_data_local_path: "{{ config_path }}/urgent_data_metrics/output/" +ml_analytics_nvsk_imp_projects_data_blob_path: "Manage_Learn_Data/micro_improvement/" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 52927ec957..27da8be26b 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -279,3 +279,7 @@ observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_c survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_cloud_blob_path}} cname_url = {{ ml_analytics_cname_url }} + +nvsk_imp_projects_data_local_path = {{ ml_analytics_nvsk_imp_projects_data_local_path }} + +nvsk_imp_projects_data_blob_path = {{ ml_analytics_nvsk_imp_projects_data_blob_path }} From ea44249610b7107c30e9d5f5f8fa635240a66bd9 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 5 Jan 2023 14:52:30 +0530 Subject: [PATCH 173/203] ED-621:updated configurations for release-5.1.0 --- ansible/inventory/env/group_vars/all.yml | 3 +-- .../stack-sunbird/templates/sunbird_cert-service.env | 12 ++++++------ .../stack-sunbird/templates/sunbird_lms-service.env | 7 +++++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 153be0f813..6cbd63ec03 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -131,9 +131,8 @@ cassandra_restore_dir: "/home/{{ ansible_ssh_user }}/" cassandra_backup_dir: /data/cassandra/backup ### Release 5.0.0 ### cassandra_multi_dc_enabled: false -# Release-5.0.1 +### Release-5.0.1 ### cloud_storage_base_url: "{{cloud_storage_base_url}}" -cloud_store_base_path_placeholder: "$CLOUD_BASE_PATH" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" diff --git a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env index c43c23171b..19a9a6c46c 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env @@ -1,13 +1,13 @@ -CONTAINER_NAME={{cert_service_container_name}} +CONTAINER_NAME={{cloud_storage_certservice_bucketname}} CLOUD_STORAGE_TYPE={{cloud_service_provider}} -PRIVATE_CLOUD_STORAGE_SECRET={{sunbird_private_storage_account_key}} -PRIVATE_CLOUD_STORAGE_KEY={{sunbird_private_storage_account_name}} +PRIVATE_CLOUD_STORAGE_SECRET={{cloud_private_storage_secret}} +PRIVATE_CLOUD_STORAGE_KEY={{cloud_private_storage_accountname}} sunbird_cert_domain_url={{proto}}://{{proxy_server_name}} sunbird_cert_enc_service_url=http://enc-service:8013 download_link_expiry_timeout=600 es_conn_info={{groups['es']|join(':9200,')}}:9200 ITEXT_LICENSE_ENABLED={{itext_license_enabled}} ITEXT_LICENSE_PATH=/home/sunbird/itext_trail_license.xml -PUBLIC_CLOUD_STORAGE_KEY={{sunbird_public_storage_account_name}} -PUBLIC_CLOUD_STORAGE_SECRET={{sunbird_public_storage_account_key}} -PUBLIC_CONTAINER_NAME={{sunbird_cert_qr_container_name}} +PUBLIC_CLOUD_STORAGE_KEY={{cloud_public_storage_accountname}} +PUBLIC_CLOUD_STORAGE_SECRET={{cloud_public_storage_secret}} +PUBLIC_CONTAINER_NAME={{cloud_storage_certqr_bucketname}} diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 1b3fdba3ca..946bf3af10 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -80,7 +80,7 @@ sunbird_course_batch_notification_enabled={{sunbird_course_batch_notification_en sunbird_course_batch_notification_signature={{sunbird_course_batch_notification_signature}} sunbird_otp_expiration={{sunbird_otp_expiration}} sunbird_otp_length={{sunbird_otp_length}} -sunbird_content_azure_storage_container={{sunbird_content_azure_storage_container}} +sunbird_content_cloud_storage_container={{cloud_storage_content_bucketname}} # Release-1.14 sunbird_time_zone={{sunbird_time_zone}} # Release-1.15 @@ -143,4 +143,7 @@ enrollment_list_size={{ enrollment_list_size | default(1000) }} # Release-5.0.0 sunbird_cloud_service_provider={{cloud_service_provider}} -isMultiDCEnabled={{cassandra_multi_dc_enabled}} \ No newline at end of file +isMultiDCEnabled={{cassandra_multi_dc_enabled}} + +# Release-5.0.1 +cloud_storage_base_url={{cloud_storage_base_url}} \ No newline at end of file From 1b8fe739a0ec590730f16ae2578f05064464659b Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Thu, 5 Jan 2023 17:05:03 +0530 Subject: [PATCH 174/203] Updated private_repo template with CSP changes (#3689) --- .../ansible/inventory/dev/Core/common.yml | 166 +++++++++++++----- .../ansible/inventory/dev/Core/secrets.yml | 72 ++++---- .../inventory/dev/DataPipeline/common.yml | 141 +++++++++++++-- .../inventory/dev/DataPipeline/secrets.yml | 46 +++-- .../dev/KnowledgePlatform/common.yml | 159 ++++++++++++++--- .../dev/KnowledgePlatform/secrets.yml | 45 ++++- .../ansible/inventory/dev/UCI/common.yml | 1 + private_repo/ansible/inventory/dev/UCI/hosts | 1 + .../ansible/inventory/dev/UCI/secrets.yml | 1 + .../inventory/dev/managed-learn/common.yml | 1 + .../ansible/inventory/dev/managed-learn/hosts | 1 + .../inventory/dev/managed-learn/secrets.yml | 1 + 12 files changed, 499 insertions(+), 136 deletions(-) create mode 120000 private_repo/ansible/inventory/dev/UCI/common.yml create mode 120000 private_repo/ansible/inventory/dev/UCI/hosts create mode 120000 private_repo/ansible/inventory/dev/UCI/secrets.yml create mode 120000 private_repo/ansible/inventory/dev/managed-learn/common.yml create mode 120000 private_repo/ansible/inventory/dev/managed-learn/hosts create mode 120000 private_repo/ansible/inventory/dev/managed-learn/secrets.yml diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 1984bcd2b3..bee6dc7028 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -20,41 +20,97 @@ sunbird_mail_server_from_email: "support@myorg.com" # Email ID that should # List of mail ids to whome the monitoring alerts should be sent. alerts_mailing_list : "devops@myorg.com" # Comma separated mail list for Alerts; eg: user1@mail.com, user2@mail.com - -# Define the below if you are using Azure Cloud -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) -sunbird_artifact_storage_account_name: "{{ sunbird_management_storage_account_name }}" # Azure account name for storing artifacts data (like jenkins build zip files) - -azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -azure_private_storage_account_name: "{{ sunbird_private_storage_account_name }}" -azure_management_storage_account_name: "{{ sunbird_management_storage_account_name }}" -azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" - -# Define the below if you are using AWS Cloud -aws_region: "" -aws_management_s3_bucket_name: "" -aws_artifact_s3_bucket_name: "" -aws_public_s3_bucket_name: "" -aws_private_s3_bucket_name: "" - -# Define the below if you are using Google Cloud -gcloud_private_bucket_name: "" -gcloud_public_bucket_name: "" -gcloud_artifact_bucket_name: "" -gcloud_management_bucket_name: "" - -gcloud_private_bucket_projectId: "" +# Cloud Service Provider Variables +# If cloud_service_provider is AWS then update with access key as value +# Example: cloud_public_storage_accountname: "AKSHKSJHFJDHJDSHFKSD" +# If cloud_service_provider is gloud(GCP) then update with service account name +# Example: cloud_public_storage_accountname: "cloudstorage-gcp-test.iam.gserviceaccount.com" +# If cloud_service_provider is AZURE then update with stoage account name +# Example: cloud_public_storage_accountname: "azurestotageaccount" +cloud_public_storage_accountname: "" +# If cloud_service_provider is AWS then update with region +# Example: cloud_public_storage_region: us-east-1 +cloud_public_storage_region: "" +# If cloud_service_provider is gcp then update this variable with project id +# Example: cloud_public_storage_project: "sunbird-gcp-test" +cloud_public_storage_project: "" + + +# Create object storage for each below mentioned variables and update accordingly +# If cloud_service_provider is AWS update with bucket name +# If cloud_service_provider is gcloud(GCP) update with bucket name +# If cloud_service_provider is AZURE update with container name +# Example: cloud_storage_certqr_bucketname: "certqr-storage" +cloud_storage_certqr_bucketname: "" +# This storage contains chatbot related data +# Example: cloud_storage_chatbot_bucketname: "chatbot-storage" +cloud_storage_chatbot_bucketname: "" +# This storage contains dial related data +# Example: cloud_storage_dial_bucketname: "dial-storage" +cloud_storage_dial_bucketname: "" +# This storage contains flink checkpoint data +# Example: cloud_storage_flink_bucketname: "flink-storage" +cloud_storage_flink_bucketname: "" +# This storage contains portal cdn file +# Example: cloud_storage_playercdn_bucketname: "playercdn-storage" +cloud_storage_playercdn_bucketname: "" +# This storage contains public data +# Example: cloud_storage_public_bucketname: "public-storage" +cloud_storage_public_bucketname: "" +# This storage contains public reports data +# Example: cloud_storage_publicreports_bucketname: "publicreports-storage" +cloud_storage_publicreports_bucketname: "" +# This storage contains private reports data +# Example: cloud_storage_privatereports_bucketname: "privatereports-storage" +cloud_storage_privatereports_bucketname: "" +# This storage contains samiksha data +# Example: cloud_storage_samiksha_bucketname: "samiksha-storage" +cloud_storage_samiksha_bucketname: "" +# This storage contains schema data +# Example: cloud_storage_schema_bucketname: "schema-storage" +cloud_storage_schema_bucketname: "" +# This storage contains sourcing related data +# Example: cloud_storage_sourcing_bucketname: "sourcing-storage" +cloud_storage_sourcing_bucketname: "" +# This storage contains desktop app data +# Example: cloud_storage_offlineinstaller_bucketname: "offlineinstaller-storage" +cloud_storage_offlineinstaller_bucketname: "" +# This storage contains public schemas, contents +# Example: cloud_storage_content_bucketname: "content-storage" +cloud_storage_content_bucketname: "" +# This storage contains telemetry data +# Example: cloud_storage_telemetry_bucketname: "telemetry-storage" +cloud_storage_telemetry_bucketname: "" +# This storage contains T & C data +# Example: cloud_storage_termsandcondtions_bucketname: "termsandconditions-storage" +cloud_storage_termsandcondtions_bucketname: "" +# Example: cloud_storage_user_bucketname: "user-storage" +cloud_storage_user_bucketname: "" +# This storage contains crashlogs +# Example: cloud_storage_desktopappcrashlogs_bucketname: "desktopappcrashlogs-storage" +cloud_storage_desktopappcrashlogs_bucketname: "" +# This storage contains labels data +# Example: cloud_storage_label_bucketname: "label-storage" +cloud_storage_label_bucketname: "" +# Example: cloud_storage_certservice_bucketname: "certservice-storage" +cloud_storage_certservice_bucketname: "" +# This storage contains UCI services data +# Example: cloud_storage_uci_bucketname: "uci-storage" +cloud_storage_uci_bucketname: "" +# This storage contains artifacts data +# Example: cloud_storage_artifacts_bucketname: "artifact-storage" +cloud_storage_artifacts_bucketname: "" +# This storage contains backups data +# Example: cloud_storage_management_bucketname: "management-storage" +cloud_storage_management_bucketname: "" # Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) # GCP -# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} +# cloud_storage_url: https://storage.googleapis.com # AWS -# cloud_storage_url: "https://{{aws_public_s3_bucket_name}}.s3.{{aws_region}}.amazonaws.com" +# cloud_storage_url: "https://s3.{{ cloud_public_storage_region }}.amazonaws.com" # Azure -cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" +cloud_storage_url: "https://{{ cloud_public_storage_accountname }}.blob.core.windows.net" # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly @@ -81,10 +137,6 @@ proto: https # http or https, preferably https sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml -# SB-31155 - This should be deprecated in future in favour of content_storage defined in all.yml -sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml - - # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # # From: SBSMS @@ -131,19 +183,12 @@ postgres: db_admin_password: "{{core_vault_postgres_password}}" -# Azure account related vars -sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" -azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" -plugin_container_name: "{{sunbird_content_azure_storage_container}}" - keycloak_api_management_user_email: "admin@sunbird.org" sunbird_installation_email: "admin@sunbird.org" # Other vars cert_service_container_name: e-credentials # Conatiner name for cert service to store cert templates cert_service_cloud_storage_type: azure # Changes this if you use other clouds like aws, gcp -artifacts_container: artifacts # Azure blob container name to save built artifacts, default it can be arti dataexhaust_super_admin_channel: sunbird dedup_redis_host: "{{ groups['dp-redis'][0] }}" # for router service namespace: "{{ env }}" # required for bot and router, these helm charts should be moved to devops repo @@ -157,6 +202,45 @@ monitor_alerts_mail_from_email: "{{ sunbird_mail_server_from_email }}" ekstep_s3_env: "" # This variable is not used and leave the value as empty freshDesk_token: "" +# Below endpoint is not required in current release +cloud_public_storage_endpoint: "" + +# Update below vars if seperate object storage is required +cloud_private_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_private_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_private_storage_region: "{{ cloud_public_storage_region }}" +cloud_private_storage_project: "{{ cloud_public_storage_project }}" + +cloud_management_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_management_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_management_storage_region: "{{ cloud_public_storage_region }}" +cloud_management_storage_project: "{{ cloud_public_storage_project }}" + +cloud_artifact_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_artifact_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_artifact_storage_region: "{{ cloud_public_storage_region }}" +cloud_artifact_storage_project: "{{ cloud_public_storage_project }}" + +## Enable below vars to upload database backups in seperate buckets +# cloud_storage_cassandrabackup_bucketname: "" +# cloud_storage_dpcassandrabackup_bucketname: "" +# cloud_storage_dppostgresbackup_bucketname: "" +# cloud_storage_dpredisbackup_bucketname: "" +# cloud_storage_esbackup_bucketname: "" +# cloud_storage_influxdbbackup_bucketname: "" +# cloud_storage_jenkinsbackup_bucketname: "" +# cloud_storage_mongobackup_bucketname: "" +# cloud_storage_neo4jbackup_bucketname: "" +# cloud_storage_redisbackup_bucketname: "" + +# Building block vars +cloud_storage_base_url: "{{ cloud_storage_url }}" +cloudstorage_base_path: "{{ cloud_storage_url }}" +valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" + + + # Provide the admin-api consumer access to all API's - The public repo restricts this for security reasons # If you dont want to key to have access to all API's, please remove the variables kong_all_consumer_groups and kong_consumers or edit the groups to have a smaller subset kong_all_consumer_groups: diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index e8e48bf801..9b8f0f43a5 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -8,42 +8,38 @@ core_vault_docker_registry_url: "change.docker.url" # for docker hub "https core_vault_docker_registry_user: "change.docker.username" core_vault_docker_registry_password: "change.docker.password" -# Run the below command in shell -# date +'%Y-%m-%dT%H:%m:%SZ' -d '+1 year' -# sas_token=?`az storage account generate-sas --account-name "{{ azure_plugin_storage_account_name }}" --account-key "{{ azure_plugin_storage_account_key }}" --expiry $sas_expire_time --https-only --permissions acdlpruw --resource-types sco --services bfqt | xargs` -# generate a sas for the blob for entire storage accout with write and read access -sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command -sunbird_public_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command -sunbird_management_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command - -sunbird_public_storage_account_key: "change.azure.storage.account.key" -sunbird_private_storage_account_key: "change.azure.storage.account.key" -sunbird_management_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" - -# Define the below if you are using Azure Cloud -azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" -azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" -azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" -azure_artifact_storage_account_key: "{{ sunbird_artifact_storage_account_key }}" -azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" -azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" - -# Define the below if you are using AWS Cloud -aws_management_bucket_access_key: "" -aws_artifact_bucket_access_key: "" -aws_public_bucket_access_key: "" -aws_private_bucket_access_key: "" - -aws_management_bucket_secret_access_key: "" -aws_artifact_bucket_secret_access_key: "" -aws_public_bucket_secret_access_key: "" -aws_private_bucket_secret_access_key: "" - -# Define the below if you are using Google Cloud -gcp_storage_service_account_name: "" -gcp_storage_key_file: "" # gcloud service account key - refer: https://cloud.google.com/iam/docs/creating-managing-service-account-keys - +# Cloud Service Provider Secret Variables +# If cloud_service_provider is aws then update secret access key +# Example: cloud_management_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with service account json file content +# Example: cloud_management_storage_secret: | +# { +# "type": "service_account", +# "project_id": "your-project-id", +# "private_key_id": "...", +# "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", +# "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", +# "client_id": "...", +# "auth_uri": "https://accounts.google.com/o/oauth2/auth", +# "token_uri": "https://accounts.google.com/o/oauth2/token", +# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", +# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" +# } + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_management_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_management_storage_secret: "" + +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_public_storage_secret: "" # The proxy key and crt values should be padded to the right by a couple of spaces # Example: @@ -184,3 +180,7 @@ ml_analytics_druid_observation_status_injestion_spec: DruidObeservationStatusIng ml_analytics_api_access_token: ApiAccessToken # ML authorization key ml_analytics_api_authorization_key: ApiAuthorizationKey + +# update if seperate object storage is used +cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/DataPipeline/common.yml b/private_repo/ansible/inventory/dev/DataPipeline/common.yml index ef8432539b..715e9cc13a 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/common.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/common.yml @@ -5,15 +5,98 @@ domain_name: "" # your domain name like example.com # docker hub details dockerhub: "change.docker.url" # docker hub username or url incase of private registry private_ingressgateway_ip: "" # your private kubernetes load balancer ip -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_druid_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing druid data (like query results) -sunbird_artifact_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing artifacts data (like jenkins build zip files) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) +# Cloud Service Provider Variables +# If cloud_service_provider is AWS then update with access key as value +# Example: cloud_public_storage_accountname: "AKSHKSJHFJDHJDSHFKSD" +# If cloud_service_provider is gloud(GCP) then update with service account name +# Example: cloud_public_storage_accountname: "cloudstorage-gcp-test.iam.gserviceaccount.com" +# If cloud_service_provider is AZURE then update with stoage account name +# Example: cloud_public_storage_accountname: "azurestotageaccount" +cloud_public_storage_accountname: "" +# If cloud_service_provider is AWS then update with region +# Example: cloud_public_storage_region: us-east-1 +cloud_public_storage_region: "" +# If cloud_service_provider is gcp then update this variable with project id +# Example: cloud_public_storage_project: "sunbird-gcp-test" +cloud_public_storage_project: "" +# Create object storage for each below mentioned variables and update accordingly +# If cloud_service_provider is AWS update with bucket name +# If cloud_service_provider is gcloud(GCP) update with bucket name +# If cloud_service_provider is AZURE update with container name +# Example: cloud_storage_certqr_bucketname: "certqr-storage" +cloud_storage_certqr_bucketname: "" +# This storage contains chatbot related data +# Example: cloud_storage_chatbot_bucketname: "chatbot-storage" +cloud_storage_chatbot_bucketname: "" +# This storage contains dial related data +# Example: cloud_storage_dial_bucketname: "dial-storage" +cloud_storage_dial_bucketname: "" +# This storage contains flink checkpoint data +# Example: cloud_storage_flink_bucketname: "flink-storage" +cloud_storage_flink_bucketname: "" +# This storage contains portal cdn file +# Example: cloud_storage_playercdn_bucketname: "playercdn-storage" +cloud_storage_playercdn_bucketname: "" +# This storage contains public data +# Example: cloud_storage_public_bucketname: "public-storage" +cloud_storage_public_bucketname: "" +# This storage contains public reports data +# Example: cloud_storage_publicreports_bucketname: "publicreports-storage" +cloud_storage_publicreports_bucketname: "" +# This storage contains private reports data +# Example: cloud_storage_privatereports_bucketname: "privatereports-storage" +cloud_storage_privatereports_bucketname: "" +# This storage contains samiksha data +# Example: cloud_storage_samiksha_bucketname: "samiksha-storage" +cloud_storage_samiksha_bucketname: "" +# This storage contains schema data +# Example: cloud_storage_schema_bucketname: "schema-storage" +cloud_storage_schema_bucketname: "" +# This storage contains sourcing related data +# Example: cloud_storage_sourcing_bucketname: "sourcing-storage" +cloud_storage_sourcing_bucketname: "" +# This storage contains desktop app data +# Example: cloud_storage_offlineinstaller_bucketname: "offlineinstaller-storage" +cloud_storage_offlineinstaller_bucketname: "" +# This storage contains public schemas, contents +# Example: cloud_storage_content_bucketname: "content-storage" +cloud_storage_content_bucketname: "" +# This storage contains telemetry data +# Example: cloud_storage_telemetry_bucketname: "telemetry-storage" +cloud_storage_telemetry_bucketname: "" +# This storage contains T & C data +# Example: cloud_storage_termsandcondtions_bucketname: "termsandconditions-storage" +cloud_storage_termsandcondtions_bucketname: "" +# Example: cloud_storage_user_bucketname: "user-storage" +cloud_storage_user_bucketname: "" +# This storage contains crashlogs +# Example: cloud_storage_desktopappcrashlogs_bucketname: "desktopappcrashlogs-storage" +cloud_storage_desktopappcrashlogs_bucketname: "" +# This storage contains labels data +# Example: cloud_storage_label_bucketname: "label-storage" +cloud_storage_label_bucketname: "" +# Example: cloud_storage_certservice_bucketname: "certservice-storage" +cloud_storage_certservice_bucketname: "" +# This storage contains UCI services data +# Example: cloud_storage_uci_bucketname: "uci-storage" +cloud_storage_uci_bucketname: "" +# This storage contains artifacts data +# Example: cloud_storage_artifacts_bucketname: "artifact-storage" +cloud_storage_artifacts_bucketname: "" +# This storage contains backups data +# Example: cloud_storage_management_bucketname: "management-storage" +cloud_storage_management_bucketname: "" + +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# cloud_storage_url: https://storage.googleapis.com +# AWS +# cloud_storage_url: "https://s3.{{ cloud_public_storage_region }}.amazonaws.com" +# Azure +cloud_storage_url: "https://{{ cloud_public_storage_accountname }}.blob.core.windows.net" # ------------------------------------------------------------------------------------------------------------ # # Optional variables - Can be left blank if you dont plan to use the intended features @@ -48,12 +131,47 @@ postgres: db_admin_password: "{{dp_vault_pgdb_admin_password}}" druid_postgres_user: druid # Do not change this -sunbird_private_azure_report_container_name: 'reports' -sunbird_public_azure_report_container_name: 'public-reports' imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins core_kubeconfig_path: "{{ kubeconfig_path }}" # kubeconfig file path on jenkins for core kube cluster, change this if you use separate kube cluster for core and KP + DP +# Below endpoint is not required in current release +cloud_public_storage_endpoint: "" + +# Update below vars if seperate object storage is required +cloud_private_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_private_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_private_storage_region: "{{ cloud_public_storage_region }}" +cloud_private_storage_project: "{{ cloud_public_storage_project }}" + +cloud_management_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_management_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_management_storage_region: "{{ cloud_public_storage_region }}" +cloud_management_storage_project: "{{ cloud_public_storage_project }}" + +cloud_artifact_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_artifact_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_artifact_storage_region: "{{ cloud_public_storage_region }}" +cloud_artifact_storage_project: "{{ cloud_public_storage_project }}" + +## Enable below vars to upload database backups in seperate buckets +# cloud_storage_cassandrabackup_bucketname: "" +# cloud_storage_dpcassandrabackup_bucketname: "" +# cloud_storage_dppostgresbackup_bucketname: "" +# cloud_storage_dpredisbackup_bucketname: "" +# cloud_storage_esbackup_bucketname: "" +# cloud_storage_influxdbbackup_bucketname: "" +# cloud_storage_jenkinsbackup_bucketname: "" +# cloud_storage_mongobackup_bucketname: "" +# cloud_storage_neo4jbackup_bucketname: "" +# cloud_storage_redisbackup_bucketname: "" + +# Building block vars +cloud_storage_base_url: "{{ cloud_storage_url }}" +cloudstorage_base_path: "{{ cloud_storage_url }}" +valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" + # The below sets the kafka topics retention time to 1 day, if you use the defaults from the public repo, it will be 7 days # If you want to retain the topics for 7 days, remove the below sections completely # Ensure you have atleast 1 TB of disk to retain data for 7 days @@ -170,10 +288,3 @@ processing_kafka_overriden_topics: retention_time: 86400000 replication_factor: 1 -# Define the below if you are using Google Cloud -gcloud_private_bucket_name: "" -gcloud_public_bucket_name: "" -gcloud_artifact_bucket_name: "" -gcloud_management_bucket_name: "" - -gcloud_private_bucket_projectId: "" diff --git a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml index c37b74d8fe..2b711a27a2 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml @@ -4,13 +4,38 @@ dp_vault_pgdb_password: "change.postgres.password" # postgres p dp_vault_pgdb_admin_password: "change.postgres.password" # postgres password for admin dp_vault_druid_postgress_pass: "change.postgres.password" # postgres password for druid db -# Azure storage account credentials - Note if you are using a single account, you can set the same key for the belows -sunbird_management_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_key: "change.azure.storage.account.key" -sunbird_private_storage_account_key: "change.azure.storage.account.key" -sunbird_public_storage_account_key: "change.azure.storage.account.key" -sunbird_druid_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" # this must be the sas token from Core directory that your generated +# Cloud Service Provider Secret Variables +# If cloud_service_provider is aws then update secret access key +# Example: cloud_management_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with service account json file content +# Example: cloud_management_storage_secret: | +# { +# "type": "service_account", +# "project_id": "your-project-id", +# "private_key_id": "...", +# "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", +# "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", +# "client_id": "...", +# "auth_uri": "https://accounts.google.com/o/oauth2/auth", +# "token_uri": "https://accounts.google.com/o/oauth2/token", +# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", +# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" +# } + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_management_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_management_storage_secret: "" + +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_public_storage_secret: "" core_vault_docker_registry_url: "change.docker.url" # for docker hub use https://index.docker.io/v1 core_vault_docker_registry_user: "change.docker.user" @@ -25,8 +50,7 @@ dp_vault_data_exhaust_token: # slack api token # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so -dp_vault_artifacts_container: artifacts -# Define the below if you are using Google Cloud -gcp_storage_service_account_name: "" -gcp_storage_key_file: "" +# update if seperate object storage is used +cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index b905d7b359..bede16cb5b 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -5,19 +5,107 @@ cloud_service_provider: "" # Your cloud service provider name. Supported v dockerhub: "change.docker.url" # docker hub username or url incase of private registry private_ingressgateway_ip: "" # your private kubernetes load balancer ip domain_name: "" # your domain name like example.com -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_artifact_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing artifacts data (like jenkins build zip files) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) + +# Cloud Service Provider Variables +# If cloud_service_provider is AWS then update with access key as value +# Example: cloud_public_storage_accountname: "AKSHKSJHFJDHJDSHFKSD" +# If cloud_service_provider is gloud(GCP) then update with service account name +# Example: cloud_public_storage_accountname: "cloudstorage-gcp-test.iam.gserviceaccount.com" +# If cloud_service_provider is AZURE then update with stoage account name +# Example: cloud_public_storage_accountname: "azurestotageaccount" +cloud_public_storage_accountname: "" +# If cloud_service_provider is AWS then update with region +# Example: cloud_public_storage_region: us-east-1 +cloud_public_storage_region: "" +# If cloud_service_provider is gcp then update this variable with project id +# Example: cloud_public_storage_project: "sunbird-gcp-test" +cloud_public_storage_project: "" +# If cloud_service_provider is oci update this variable with namespace +# Example: cloud_public_storage_namespace: "apsjfhudfjs" +cloud_public_storage_namespace: "" + + +# Create object storage for each below mentioned variables and update accordingly +# If cloud_service_provider is AWS update with bucket name +# If cloud_service_provider is gcloud(GCP) update with bucket name +# If cloud_service_provider is AZURE update with container name +# Example: cloud_storage_certqr_bucketname: "certqr-storage" +cloud_storage_certqr_bucketname: "" +# This storage contains chatbot related data +# Example: cloud_storage_chatbot_bucketname: "chatbot-storage" +cloud_storage_chatbot_bucketname: "" +# This storage contains dial related data +# Example: cloud_storage_dial_bucketname: "dial-storage" +cloud_storage_dial_bucketname: "" +# This storage contains flink checkpoint data +# Example: cloud_storage_flink_bucketname: "flink-storage" +cloud_storage_flink_bucketname: "" +# This storage contains portal cdn file +# Example: cloud_storage_playercdn_bucketname: "playercdn-storage" +cloud_storage_playercdn_bucketname: "" +# This storage contains public data +# Example: cloud_storage_public_bucketname: "public-storage" +cloud_storage_public_bucketname: "" +# This storage contains public reports data +# Example: cloud_storage_publicreports_bucketname: "publicreports-storage" +cloud_storage_publicreports_bucketname: "" +# This storage contains private reports data +# Example: cloud_storage_privatereports_bucketname: "privatereports-storage" +cloud_storage_privatereports_bucketname: "" +# This storage contains samiksha data +# Example: cloud_storage_samiksha_bucketname: "samiksha-storage" +cloud_storage_samiksha_bucketname: "" +# This storage contains schema data +# Example: cloud_storage_schema_bucketname: "schema-storage" +cloud_storage_schema_bucketname: "" +# This storage contains sourcing related data +# Example: cloud_storage_sourcing_bucketname: "sourcing-storage" +cloud_storage_sourcing_bucketname: "" +# This storage contains desktop app data +# Example: cloud_storage_offlineinstaller_bucketname: "offlineinstaller-storage" +cloud_storage_offlineinstaller_bucketname: "" +# This storage contains public schemas, contents +# Example: cloud_storage_content_bucketname: "content-storage" +cloud_storage_content_bucketname: "" +# This storage contains telemetry data +# Example: cloud_storage_telemetry_bucketname: "telemetry-storage" +cloud_storage_telemetry_bucketname: "" +# This storage contains T & C data +# Example: cloud_storage_termsandcondtions_bucketname: "termsandconditions-storage" +cloud_storage_termsandcondtions_bucketname: "" +# Example: cloud_storage_user_bucketname: "user-storage" +cloud_storage_user_bucketname: "" +# This storage contains crashlogs +# Example: cloud_storage_desktopappcrashlogs_bucketname: "desktopappcrashlogs-storage" +cloud_storage_desktopappcrashlogs_bucketname: "" +# This storage contains labels data +# Example: cloud_storage_label_bucketname: "label-storage" +cloud_storage_label_bucketname: "" +# Example: cloud_storage_certservice_bucketname: "certservice-storage" +cloud_storage_certservice_bucketname: "" +# This storage contains UCI services data +# Example: cloud_storage_uci_bucketname: "uci-storage" +cloud_storage_uci_bucketname: "" +# This storage contains artifacts data +# Example: cloud_storage_artifacts_bucketname: "artifact-storage" +cloud_storage_artifacts_bucketname: "" +# This storage contains backups data +# Example: cloud_storage_management_bucketname: "management-storage" +cloud_storage_management_bucketname: "" + +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# cloud_storage_url: https://storage.googleapis.com +# AWS +# cloud_storage_url: "https://s3.{{ cloud_public_storage_region }}.amazonaws.com" +# Azure +cloud_storage_url: "https://{{ cloud_public_storage_accountname }}.blob.core.windows.net" # Optional env: dev # some name like dev, preprod etc proto: https # http or https, preferably https -azure_public_container: contents # Azure container name for storing public data (like contents) - environment_id: "10000003" # A 8 digit number for example like 1000000, # Important: same as the one in core/common.yaml neo4j_zip: neo4j-community-3.3.9-unix.tar.gz # Neo4j file name present in the azure blob artifacts folder (only neo4j 3.4 and below is supported) @@ -28,27 +116,50 @@ neo4j_enterprise: false # Set this to true if you use # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so ekstep_domain_name: "{{ proto }}://{{ domain_name }}" -artifacts_container: artifacts - -# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) -# GCP -# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} -# AWS -# cloud_storage_url: # Geetha to fill this url based on AWS role vars -# Azure -cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # SB-31155 - This should be deprecated in future in favour of plugin_storage -plugin_container_name: "{{ azure_public_container }}" +plugin_container_name: "{{ cloud_storage_content_bucketname }}" -kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" +kp_schema_base_path: "{{ cloud_storage_url }}/{{ cloud_storage_content_bucketname }}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins -# Define the below if you are using Google Cloud -gcloud_private_bucket_name: "" -gcloud_public_bucket_name: "" -gcloud_artifact_bucket_name: "" -gcloud_management_bucket_name: "" +# Below endpoint is not required in current release +cloud_public_storage_endpoint: "" + +# Update below vars if seperate object storage is required +cloud_private_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_private_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_private_storage_region: "{{ cloud_public_storage_region }}" +cloud_private_storage_project: "{{ cloud_public_storage_project }}" +cloud_private_storage_namespace: "{{ cloud_public_storage_namespace }}" + +cloud_management_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_management_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_management_storage_region: "{{ cloud_public_storage_region }}" +cloud_management_storage_project: "{{ cloud_public_storage_project }}" +cloud_management_storage_namespace: "{{ cloud_public_storage_namespace }}" + +cloud_artifact_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_artifact_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_artifact_storage_region: "{{ cloud_public_storage_region }}" +cloud_artifact_storage_project: "{{ cloud_public_storage_project }}" +cloud_artifact_storage_namespace: "{{ cloud_public_storage_namespace }}" + +## Enable below vars to upload database backups in seperate buckets +# cloud_storage_cassandrabackup_bucketname: "" +# cloud_storage_dpcassandrabackup_bucketname: "" +# cloud_storage_dppostgresbackup_bucketname: "" +# cloud_storage_dpredisbackup_bucketname: "" +# cloud_storage_esbackup_bucketname: "" +# cloud_storage_influxdbbackup_bucketname: "" +# cloud_storage_jenkinsbackup_bucketname: "" +# cloud_storage_mongobackup_bucketname: "" +# cloud_storage_neo4jbackup_bucketname: "" +# cloud_storage_redisbackup_bucketname: "" -gcloud_private_bucket_projectId: "" +# Building block vars +cloud_storage_base_url: "{{ cloud_storage_url }}" +cloudstorage_base_path: "{{ cloud_storage_url }}" +valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml index ef5db134da..1b62ad0a1f 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml @@ -1,11 +1,38 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # -# Azure storage account credentials - Note if you are using a single account, you can set the same key for the belows -sunbird_private_storage_account_key: "change.azure.storage.account.key" -sunbird_public_storage_account_key: "change.azure.storage.account.key" -sunbird_management_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" + +# Cloud Service Provider Secret Variables +# If cloud_service_provider is aws then update secret access key +# Example: cloud_management_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with service account json file content +# Example: cloud_management_storage_secret: | +# { +# "type": "service_account", +# "project_id": "your-project-id", +# "private_key_id": "...", +# "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", +# "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", +# "client_id": "...", +# "auth_uri": "https://accounts.google.com/o/oauth2/auth", +# "token_uri": "https://accounts.google.com/o/oauth2/token", +# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", +# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" +# } + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_management_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_management_storage_secret: "" + +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_public_storage_secret: "" core_vault_docker_registry_url: "change.docker.url" # for docker hub use https://index.docker.io/v1 core_vault_docker_registry_user: "change.docker.user" @@ -19,6 +46,6 @@ lp_vault_youtube_api_key: # youtube api token if you want # Sensible defaults which you need not change - But if you would like to change, you are free to do so lp_vault_graph_passport_key: "long-secret-to-calm-entropy-gods" -# Define the below if you are using Google Cloud -gcp_storage_service_account_name: "" -gcp_storage_key_file: "" +# update if seperate object storage is used +cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/common.yml b/private_repo/ansible/inventory/dev/UCI/common.yml new file mode 120000 index 0000000000..1465b46671 --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/common.yml @@ -0,0 +1 @@ +../Core/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/hosts b/private_repo/ansible/inventory/dev/UCI/hosts new file mode 120000 index 0000000000..fb74d690d4 --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/hosts @@ -0,0 +1 @@ +../Core/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/secrets.yml b/private_repo/ansible/inventory/dev/UCI/secrets.yml new file mode 120000 index 0000000000..6bbc077aab --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/secrets.yml @@ -0,0 +1 @@ +../Core/secrets.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/common.yml b/private_repo/ansible/inventory/dev/managed-learn/common.yml new file mode 120000 index 0000000000..1465b46671 --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/common.yml @@ -0,0 +1 @@ +../Core/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/hosts b/private_repo/ansible/inventory/dev/managed-learn/hosts new file mode 120000 index 0000000000..fb74d690d4 --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/hosts @@ -0,0 +1 @@ +../Core/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/secrets.yml b/private_repo/ansible/inventory/dev/managed-learn/secrets.yml new file mode 120000 index 0000000000..6bbc077aab --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/secrets.yml @@ -0,0 +1 @@ +../Core/secrets.yml \ No newline at end of file From 664f931df9f36d273ce854cf85deff9e0dbdbcc4 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 10 Jan 2023 21:12:22 +0530 Subject: [PATCH 175/203] fix: adding graylog related changes --- .../ansible/inventory/dev/Core/common.yml | 15 +++++++++++++++ private_repo/ansible/inventory/dev/Core/hosts | 7 +++++++ .../ansible/inventory/dev/DataPipeline/hosts | 6 ++++++ .../ansible/inventory/dev/KnowledgePlatform/hosts | 6 ++++++ 4 files changed, 34 insertions(+) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index bee6dc7028..a85b01c898 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -128,6 +128,8 @@ nginx_private_ingress_annotations: # ------------------------------------------------------------------------------------------------------------ # +graylog_trusted_proxies: "1.2.3.4/21" # This should be the kubernetes nodes subnet CIDR range + ## Optional learningservice_ip: "10.0.1.5" # Load balancer IP or server ip @@ -394,3 +396,16 @@ grafana_login_whitelisted_emails: |- # Add below var to monitor report-cassandra server report_cassandra_server_count: "{{ groups['report-cassandra'] | length }}" + +# graylog +graylog_open_to_public: true +send_logs_to_graylog: true +graylog_root_timezone: "Asia/Kolkata" +graylog_elasticsearch_discovery_enabled: "true" +graylog_allow_leading_wildcard_searches: "true" +graylog_allow_highlighting: "true" +graylog_transport_email_enabled: "true" +graylog_transport_email_hostname: "{{ mail_server_host }}" +graylog_transport_email_auth_username: "apikey" +graylog_transport_email_from_email: "{{ sunbird_mail_server_from_email }}" +graylog_transport_email_use_ssl: "false" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Core/hosts b/private_repo/ansible/inventory/dev/Core/hosts index 58fb6eeb95..6bcd606290 100644 --- a/private_repo/ansible/inventory/dev/Core/hosts +++ b/private_repo/ansible/inventory/dev/Core/hosts @@ -9,6 +9,12 @@ ansible_ssh_private_key_file=/var/lib/jenkins/secrets/deployer_ssh_key [keycloak:children] keycloak-1 +[graylog-1] +10.0.1.9 mongodb_master=True graylog_is_master=True + +[graylog:children] +graylog-1 + [log-es-1] 10.0.1.9 es_instance_name=log-es-1 node_name=log-es-1 es_etc_node_master=true es_etc_node_data=true @@ -137,6 +143,7 @@ node-exporter [core:children] es +graylog log-es cassandra postgresql-master diff --git a/private_repo/ansible/inventory/dev/DataPipeline/hosts b/private_repo/ansible/inventory/dev/DataPipeline/hosts index 2ecf51e3bc..199392ce08 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/hosts +++ b/private_repo/ansible/inventory/dev/DataPipeline/hosts @@ -19,6 +19,12 @@ core-es-1 [log-es:children] log-es-1 +[graylog-1] +10.0.1.9 + +[graylog:children] +graylog-1 + ################# KP ########################## [learning] 10.0.1.5 diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts index c144bc6fa2..e66c3c6ab0 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts @@ -62,6 +62,12 @@ core-es-1 [log-es:children] log-es-1 +[graylog-1] +10.0.1.9 + +[graylog:children] +graylog-1 + [composite-search-cluster] 10.1.4.5 From a23919d698b1c5181fd1458651c664ba52b92a75 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Tue, 10 Jan 2023 21:26:55 +0530 Subject: [PATCH 176/203] Release 5.1.0 (#3697) * Add Lern inventory template * Install openjdk-11.0.2 in jenkins * Add Lern BB vars to template * Remove duplicate flink jobs * Update template vars * Update lern specific vars --- deploy/jenkins/jenkins-server-setup.sh | 7 +++ .../jobs/FlinkPipelineJobs/config.xml | 3 +- .../jobs/FlinkJobs/config.xml | 2 - .../dev/KnowledgePlatform/common.yml | 47 ++++++++++++++++++- .../inventory/dev/KnowledgePlatform/hosts | 30 ++++++++++++ .../dev/KnowledgePlatform/secrets.yml | 27 ++++++++++- .../ansible/inventory/dev/Lern/common.yml | 1 + private_repo/ansible/inventory/dev/Lern/hosts | 1 + .../ansible/inventory/dev/Lern/secrets.yml | 1 + 9 files changed, 113 insertions(+), 6 deletions(-) create mode 120000 private_repo/ansible/inventory/dev/Lern/common.yml create mode 120000 private_repo/ansible/inventory/dev/Lern/hosts create mode 120000 private_repo/ansible/inventory/dev/Lern/secrets.yml diff --git a/deploy/jenkins/jenkins-server-setup.sh b/deploy/jenkins/jenkins-server-setup.sh index f2ef322969..670395335b 100755 --- a/deploy/jenkins/jenkins-server-setup.sh +++ b/deploy/jenkins/jenkins-server-setup.sh @@ -145,6 +145,13 @@ mv jdk-11 java-11-openjdk-amd64 cp -r java-11-openjdk-amd64 /usr/lib/jvm/ rm -rf java-11-openjdk-amd64 openjdk-11+28_linux-x64_bin.tar.gz +#Install openjdk-11.0.2 # needed for DP jobs +echo -e "\n\e[0;32m${bold}Installating openjdk 11.0.2${normal}" +wget https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz +tar -xf openjdk-11.0.2_linux-x64_bin.tar.gz +mv jdk-11.0.2 /usr/lib/jvm/ +rm openjdk-11.0.2_linux-x64_bin.tar.gz + #Install maven 3.6.3 echo -e "\n\e[0;32m${bold}Installating maven 3.6.3${normal}" wget https://downloads.apache.org/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml index 8daf73245a..96881ee988 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml @@ -102,7 +102,6 @@ return """<b>This parameter is not used</b>""" 'de-normalization-primary', 'de-normalization-secondary', 'druid-validator', -'assessment-aggregator', 'content-cache-updater', 'user-cache-updater-v2', 'summary-denormalization', @@ -170,4 +169,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml index cb98de88c1..9a0134703e 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml @@ -104,8 +104,6 @@ return """<b>This parameter is not used</b>""" 'asset-enrichment', 'audit-event-generator', 'audit-history-indexer', -'collection-cert-pre-processor', -'collection-certificate-generator', 'auto-creator-v2', 'metrics-data-transformer', 'content-publish', diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index bede16cb5b..1702cc633b 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -162,4 +162,49 @@ cloud_artifact_storage_namespace: "{{ cloud_public_storage_namespace }}" cloud_storage_base_url: "{{ cloud_storage_url }}" cloudstorage_base_path: "{{ cloud_storage_url }}" valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' -cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" \ No newline at end of file +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" +cloud_storage_pathstyle_access: false + +### Lern BB - Adding Lern specific vars here. In future if we want to move it to seperate folder this can be used as the starting point + +# Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # +is_multidc_enabled: false # Change this to true if you plan to use cassandra multi data center setup +#Assessment Aggregator Content Read API +content_read_api_host: "" # Your domain host ex: http://test.com +content_read_api_endpoint: "" # ex: api/content/v1/read/ + +# For sendgrid, if you want to change, update the following +sunbird_mail_server_host: "smtp.sendgrid.net" # Email host, can be any email provider +sunbird_mail_server_username: "apikey" # Email provider username; for sendgrid you can use "apikey" +sunbird_mail_server_port: ## Email server SMTP port ex: 587 +# This mail id should be verified by your provider. This is the mail id which will be used for `From Address`. For example, +# From: support@sunbird.org +# Subject: Forgot password +# Hi..... +sunbird_mail_server_from_email: "support@myorg.com" # Email ID that should be as from address in mails + +# Optional variables - Can be left blank if you dont plan to use the intended features +# data exhaust alerts +data_exhaust_webhook_url: "slack.com" # Slack webhook url +data_exhaust_Channel: "slack.com" # Slack channel for data products alerts + +# This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, +# From: SBSMS +# Hi..... +# This is optional. +# If not set, you won't get sms OTPs. You'll get it in mail though. +sunbird_notification_msg_default_sender: # SMS from Address; exact 6 char like SBSUNB + + +# Sensible defaults which you need not change - But if you would like to change, you are free to do so +data_exhaust_name: "lern-datapipeline-monitoring" # Slack notification name +postgres: + db_url: "{{ groups['postgres'][0] }}" + db_username: analytics + db_name: analytics + db_table_name: "{{env}}_consumer_channel_mapping" + db_port: 5432 + db_admin_user: postgres + db_admin_password: "{{dp_vault_pgdb_admin_password}}" + +### Lern BB diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts index c144bc6fa2..828e96c01e 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts @@ -106,3 +106,33 @@ lp [mlworkbench] 0.0.0.0 + +### Lern BB - Adding Lern specific vars here. In future if we want to move it to seperate folder this can be used as the starting point +[spark] +10.0.2.4 + +[learning] +10.0.2.7 + +[raw-coordinator] +10.0.2.7 + +[raw-overlord] +10.0.2.7 + +[raw-broker] +10.0.2.7 + +[postgres] +10.0.2.5 + +[report-cassandra:children] +core-cassandra + +[lp-cassandra] +10.0.2.5 + +[redis] +10.0.2.2 + +### Lern BB diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml index 1b62ad0a1f..0a03bfdb43 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml @@ -47,5 +47,30 @@ lp_vault_youtube_api_key: # youtube api token if you want lp_vault_graph_passport_key: "long-secret-to-calm-entropy-gods" # update if seperate object storage is used +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" -cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" + +### Lern BB - Adding Lern specific vars here. In future if we want to move it to seperate folder this can be used as the starting point + +# Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # +core_vault_sunbird_api_auth_token: # copy value form variable core_vault_sunbird_api_auth_token from core/secrets.yml +dp_vault_pgdb_password: "change.postgres.password" # postgres password for analytics db +dp_vault_druid_postgress_pass: "change.postgres.password" # postgres password for druid db +dp_vault_pgdb_admin_password: "change.postgres.password" # postgres password for admin +core_vault_sunbird_encryption_key: "" # copy value from variable core_vault_sunbird_encryption_key from core secrets.yml + +# Optional variables - Can be left blank if you dont plan to use the intended features +core_vault_sunbird_fcm_account_key: "" # Firebase Cloud Messaging API Key +sunbird_msg_91_auth: "" # API key for sending OTP SMS +sunbird_mail_server_password: "" # Email server password +dp_vault_data_exhaust_token: "" # slack api token + +### Lern BB diff --git a/private_repo/ansible/inventory/dev/Lern/common.yml b/private_repo/ansible/inventory/dev/Lern/common.yml new file mode 120000 index 0000000000..1168242b3a --- /dev/null +++ b/private_repo/ansible/inventory/dev/Lern/common.yml @@ -0,0 +1 @@ +../KnowledgePlatform/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Lern/hosts b/private_repo/ansible/inventory/dev/Lern/hosts new file mode 120000 index 0000000000..d54fc1e61a --- /dev/null +++ b/private_repo/ansible/inventory/dev/Lern/hosts @@ -0,0 +1 @@ +../KnowledgePlatform/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Lern/secrets.yml b/private_repo/ansible/inventory/dev/Lern/secrets.yml new file mode 120000 index 0000000000..1a0f3f3224 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Lern/secrets.yml @@ -0,0 +1 @@ +../KnowledgePlatform/secrets.yml \ No newline at end of file From f94a83f3b240220414aab3c67c7b5e63e088ab08 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 10 Jan 2023 23:52:59 +0530 Subject: [PATCH 177/203] feat: ED-699 adding data to enable opa Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/Core/common.yml | 57 +++++++++++++++---- .../ansible/inventory/dev/Kubernetes/keys | 1 + .../inventory/dev/Sunbird-RC/common.yml | 1 + .../ansible/inventory/dev/Sunbird-RC/hosts | 1 + .../ansible/inventory/dev/Sunbird-RC/keys | 1 + .../inventory/dev/Sunbird-RC/secrets.yml | 1 + private_repo/ansible/inventory/dev/UCI/keys | 1 + .../ansible/inventory/dev/key-generate.sh | 21 +++++++ .../ansible/inventory/dev/managed-learn/keys | 1 + 9 files changed, 73 insertions(+), 12 deletions(-) create mode 120000 private_repo/ansible/inventory/dev/Kubernetes/keys create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/common.yml create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/hosts create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/keys create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml create mode 120000 private_repo/ansible/inventory/dev/UCI/keys create mode 100755 private_repo/ansible/inventory/dev/key-generate.sh create mode 120000 private_repo/ansible/inventory/dev/managed-learn/keys diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index a85b01c898..f2d6925eaf 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -397,15 +397,48 @@ grafana_login_whitelisted_emails: |- # Add below var to monitor report-cassandra server report_cassandra_server_count: "{{ groups['report-cassandra'] | length }}" -# graylog -graylog_open_to_public: true -send_logs_to_graylog: true -graylog_root_timezone: "Asia/Kolkata" -graylog_elasticsearch_discovery_enabled: "true" -graylog_allow_leading_wildcard_searches: "true" -graylog_allow_highlighting: "true" -graylog_transport_email_enabled: "true" -graylog_transport_email_hostname: "{{ mail_server_host }}" -graylog_transport_email_auth_username: "apikey" -graylog_transport_email_from_email: "{{ sunbird_mail_server_from_email }}" -graylog_transport_email_use_ssl: "false" \ No newline at end of file +# graylog variables +graylog_open_to_public: true # allows you to access via domain/graylog +send_logs_to_graylog: true # agents starts sending logs to graylog instead of ES +graylog_root_timezone: "Asia/Kolkata" # timezone +graylog_elasticsearch_discovery_enabled: "true" # auto discover ES nodes and version +graylog_allow_leading_wildcard_searches: "true" # allows wild card searching +graylog_allow_highlighting: "true" # shows yellow highlights on matches +graylog_transport_email_enabled: "true" # enables emails to be sent via graylog +graylog_transport_email_hostname: "{{ mail_server_host }}" # email server host name +graylog_transport_email_auth_username: "apikey" # sendgrid / email service api key +graylog_transport_email_from_email: "{{ sunbird_mail_server_from_email }}" # from email address +graylog_transport_email_use_ssl: "false" # cannot use both tls and ssl, so disabling ssl as tls is enabled by default + +# Opa and Adminutils +# Prefixes will match the starting part of the files under keys dirctory in inventory +adminutil__device_keyprefix: "mobile_devicev2_key" # private key prefix for mobile apps +adminutil__device_keystart: 1 # starting number of the key file +adminutil__device_keycount: 10 # ending number of the key file +adminutil__access_keyprefix: "accessv1_key" # private key prefix for user access tokens +adminutil__access_keystart: 1 # starting number of the key file +adminutil__access_keycount: 10 # ending number of the key file +adminutil__desktop_keyprefix: "desktop_devicev2_key" # private key prefix for desktop apps +adminutil__desktop_keystart: 1 # starting number of the key file +adminutil__desktop_keycount: 10 # ending number of the key file +adminutil__portal_anonymous_keyprefix: "portal_anonymous_key" # private key prefix for portal anonymous sessions +adminutil__portal_anonymous_keystart: 1 # starting number of the key file +adminutil__portal_anonymous_keycount: 10 # ending number of the key file +adminutil__portal_loggedin_keyprefix: "portal_loggedin_key" # private key prefix for portal loggedin sessions +adminutil__portal_loggedin_keystart: 1 # starting number of the key file +adminutil__portal_loggedin_keycount: 10 # ending number of the key file +adminutil_embed_role: 'true' # embeds user roles in access tokens + +# Kong and Adminutils +# Consumer names will match the starting part of the files under keys dirctory in inventory +kong_mobile_v2_consumer: "mobile_devicev2" # kong consumer name for mobile apps +kong_desktop_v2_consumer: "desktop_devicev2" # kong consumer name for desktop apps +kong_portal_anonymous_consumer: "portal_anonymous" # kong consumer name for portal anonymous sessions +kong_portal_loggedin_consumer: "portal_loggedin" # kong consumer name for portal loggedin sessions +kong_desktop_device_consumer_names_for_opa: '["desktop_devicev2", "desktop_device"]' # ops checks will be skipped for desktop consumers + +# Portal sessions +sunbird_kong_device_register: 'true' # enables refersh token api call after login +sunbird_kong_device_register_anonymous: 'true' # enabled anonymous sessions +sunbird_session_store_type: redis # uses redis for session data instead of cassandra +portal_redis_connection_string: "redis://:@{{ sunbird_redis_host }}:6379/3" # Uses KP redis and DB number 3 \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Kubernetes/keys b/private_repo/ansible/inventory/dev/Kubernetes/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Kubernetes/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/common.yml b/private_repo/ansible/inventory/dev/Sunbird-RC/common.yml new file mode 120000 index 0000000000..1465b46671 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/common.yml @@ -0,0 +1 @@ +../Core/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/hosts b/private_repo/ansible/inventory/dev/Sunbird-RC/hosts new file mode 120000 index 0000000000..fb74d690d4 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/hosts @@ -0,0 +1 @@ +../Core/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/keys b/private_repo/ansible/inventory/dev/Sunbird-RC/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml b/private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml new file mode 120000 index 0000000000..6bbc077aab --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml @@ -0,0 +1 @@ +../Core/secrets.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/keys b/private_repo/ansible/inventory/dev/UCI/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/key-generate.sh b/private_repo/ansible/inventory/dev/key-generate.sh new file mode 100755 index 0000000000..2bf82230b0 --- /dev/null +++ b/private_repo/ansible/inventory/dev/key-generate.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -euo pipefail +read -s -p 'Enter the ansible vault password (redacted): ' vault_pass +echo +read -s -p 'Re-enter the ansible vault password (redacted): ' confirm_vault_pass +echo +if [[ $vault_pass == $confirm_vault_pass ]] +then + echo "$vault_pass" > temp_vault_pass + cd Core/keys + for i in {1..10}; do openssl genrsa -out mobile_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in mobile_devicev2_c$i -out mobile_devicev2_key$i -nocrypt && rm -rf mobile_devicev2_c$i ; done + for i in {1..10}; do openssl genrsa -out accessv1_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in accessv1_c$i -out accessv1_key$i -nocrypt && rm -rf accessv1_c$i ; done + for i in {1..10}; do openssl genrsa -out desktop_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in desktop_devicev2_c$i -out desktop_devicev2_key$i -nocrypt && rm -rf desktop_devicev2_c$i ; done + for i in {1..10}; do openssl genrsa -out portal_anonymous_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in portal_anonymous_c$i -out portal_anonymous_key$i -nocrypt && rm -rf portal_anonymous_c$i ; done + for i in {1..10}; do openssl genrsa -out portal_loggedin_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in portal_loggedin_c$i -out portal_loggedin_key$i -nocrypt && rm -rf portal_loggedin_c$i ; done + while read -r line; do ansible-vault encrypt $line --vault-password-file ../../temp_vault_pass; done <<< $(ls) + cd ../.. && rm temp_vault_pass + echo "OK" +else + echo "Vault passwords dont match" +fi \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/keys b/private_repo/ansible/inventory/dev/managed-learn/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file From c9297de22071f6ae568bae8cb69e6ee8b15539b4 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 11 Jan 2023 00:14:51 +0530 Subject: [PATCH 178/203] fear: adding required consumers for sessions Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/Core/common.yml | 41 ++++++++++++++++--- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index f2d6925eaf..3397552d63 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -241,11 +241,9 @@ cloudstorage_base_path: "{{ cloud_storage_url }}" valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" - - # Provide the admin-api consumer access to all API's - The public repo restricts this for security reasons -# If you dont want to key to have access to all API's, please remove the variables kong_all_consumer_groups and kong_consumers or edit the groups to have a smaller subset -kong_all_consumer_groups: +# If you dont want the admin api key to have access to all API's, please remove the variables "all_apis_access_group" and "kong_consumers" or edit the groups to have a smaller subset +all_apis_access_group: - announcementAccess - anonymousAppAccess - anonymousCertificateAccess @@ -367,6 +365,37 @@ kong_all_consumer_groups: kong_consumers: - username: api-admin + groups: "{{ all_apis_access_group }}" + state: present + - username: mobile_admin + groups: "{{ mobile_admin_groups }}" + print_credentials: true + state: present + - username: mobile_app + groups: "{{ mobile_app_groups }}" + state: present + - username: mobile_device + groups: "{{ mobile_device_groups }}" + state: present + - username: mobile_devicev2 + groups: "{{ mobile_device_groups }}" + state: present + - username: portal_anonymous_register + groups: "{{ portal_anonymous_register }}" + state: present + - username: portal_loggedin_register + groups: "{{ portal_loggedin_register }}" + state: present + - username: portal_anonymous + groups: "{{ anonymous_user_groups }}" + state: present + - username: portal_loggedin + groups: "{{ kong_all_consumer_groups }}" + state: present + - username: portal_anonymous_fallback_token + groups: "{{ anonymous_user_groups }}" + state: present + - username: portal_loggedin_fallback_token groups: "{{ kong_all_consumer_groups }}" state: present @@ -439,6 +468,6 @@ kong_desktop_device_consumer_names_for_opa: '["desktop_devicev2", "desktop_devic # Portal sessions sunbird_kong_device_register: 'true' # enables refersh token api call after login -sunbird_kong_device_register_anonymous: 'true' # enabled anonymous sessions +sunbird_kong_device_register_anonymous: 'true' # enables anonymous sessions sunbird_session_store_type: redis # uses redis for session data instead of cassandra -portal_redis_connection_string: "redis://:@{{ sunbird_redis_host }}:6379/3" # Uses KP redis and DB number 3 \ No newline at end of file +portal_redis_connection_string: "redis://:@{{ sunbird_redis_host }}:6379/3" # Uses KP redis and DB number 3 to store session data \ No newline at end of file From c4ac3713c5e12a58dee937c8d6c7a86ab1dc1c30 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 11 Jan 2023 00:29:16 +0530 Subject: [PATCH 179/203] fix: adding additional set of vars for sessions and graylog Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/Core/common.yml | 2 +- .../ansible/inventory/dev/Core/secrets.yml | 18 ++++++++++++++++-- .../inventory/dev/DataPipeline/common.yml | 2 ++ .../inventory/dev/KnowledgePlatform/common.yml | 3 ++- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 3397552d63..d174f6ea24 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -428,7 +428,7 @@ report_cassandra_server_count: "{{ groups['report-cassandra'] | length }}" # graylog variables graylog_open_to_public: true # allows you to access via domain/graylog -send_logs_to_graylog: true # agents starts sending logs to graylog instead of ES +send_logs_to_graylog: true # filebeat agents will send logs to graylog instead of ES graylog_root_timezone: "Asia/Kolkata" # timezone graylog_elasticsearch_discovery_enabled: "true" # auto discover ES nodes and version graylog_allow_leading_wildcard_searches: "true" # allows wild card searching diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 9b8f0f43a5..68261f8ec5 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -117,7 +117,7 @@ mongodb_keyfile_content: | # 4. Again vist Admin Panel post restart -> Click on Plugins => Write API # 5. Under MASTER TOKENS section, click on create token button, It will generate a token. discussionsmw_nodebb_authorization_token: # Read the comment above to generate this key -core_vault_mail_server_password: "" # Email server password +core_vault_mail_server_password: "" # Email server password / api token # Oauth keys core_vault_sunbird_google_oauth_clientId_portal: # Google oauth client id @@ -126,6 +126,17 @@ core_vault_sunbird_google_captcha_site_key_portal: # Google recaptch site google_captcha_private_key: # Google recaptch private key learning_content_drive_apiKey: # Google drive api key +### Graylog ### +graylog_password_secret: "" # Random secret. Generate using the command: pwgen -s 96 1 +graylog_root_password_sha2: "" # Random secret. Generate using the command: echo -n "Enter Password: " && head -1 Date: Wed, 11 Jan 2023 10:28:05 +0530 Subject: [PATCH 180/203] LR-278 added lern specific variables --- .../roles/stack-sunbird/templates/sunbird_lms-service.env | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 946bf3af10..f1c421a603 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -146,4 +146,7 @@ sunbird_cloud_service_provider={{cloud_service_provider}} isMultiDCEnabled={{cassandra_multi_dc_enabled}} # Release-5.0.1 -cloud_storage_base_url={{cloud_storage_base_url}} \ No newline at end of file +cloud_storage_base_url={{cloud_storage_base_url}} +cloud_storage_cname_url={{ cloud_storage_cname_url | default('') }} +cloud_storage_dial_bucketname={{ cloud_storage_dial_bucketname | default('dial') }} +cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} \ No newline at end of file From 08d628c20729a091ef8da5da6b33d1a2dc19bba9 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Wed, 11 Jan 2023 15:22:24 +0530 Subject: [PATCH 181/203] LR-278 added placeholder as configurable value --- ansible/roles/stack-sunbird/defaults/main.yml | 5 ++++- .../roles/stack-sunbird/templates/sunbird_lms-service.env | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 036fda51bd..bcb4581212 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1062,4 +1062,7 @@ inquiry_assessment_publish_kafka_topic_name: "{{ env_name }}.assessment.publish. inquiry_cassandra_connection: "{{ lp_cassandra_connection }}" inquiry_cassandra_keyspace_prefix: "{{ lp_cassandra_keyspace_prefix }}" inquiry_redis_host: "{{ sunbird_lp_redis_host }}" -inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" \ No newline at end of file +inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" + +### LERN Release-5.0.1 +cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index f1c421a603..0d5131b418 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -149,4 +149,5 @@ isMultiDCEnabled={{cassandra_multi_dc_enabled}} cloud_storage_base_url={{cloud_storage_base_url}} cloud_storage_cname_url={{ cloud_storage_cname_url | default('') }} cloud_storage_dial_bucketname={{ cloud_storage_dial_bucketname | default('dial') }} -cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} \ No newline at end of file +cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} +cloud_store_base_path_placeholder={{ cloud_store_base_path_placeholder | default('CLOUD_BASE_PATH') }} \ No newline at end of file From 3937d5abe8465c6f14428463211771a1c243e591 Mon Sep 17 00:00:00 2001 From: kumarks1122 Date: Thu, 12 Jan 2023 19:44:17 +0530 Subject: [PATCH 182/203] #000 | LERN and ED Dataproducts jenkins changes added --- .../jobs/AnalyticsReplayJobs/config.xml | 14 +------------- .../Lern/jobs/LernAnalyticsReplayJobs/config.xml | 15 --------------- .../jobs/Lern/jobs/LernDataProducts/config.xml | 2 +- 3 files changed, 2 insertions(+), 29 deletions(-) diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml index 95b1f8c7e4..b65594e17b 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml @@ -48,12 +48,8 @@ <font color=dimgray size=2><b>Specify the job id.</b></font> - collection-summary-report-v2 - collection-summary-report program-collection-summary-report audit-metrics-report - admin-user-reports - admin-geo-reports district-weekly district-monthly desktop-consumption-report @@ -61,18 +57,10 @@ content-rating-updater druid-query-processor monitor-job-summ - course-enrollment-report textbook-progress-report etb-metrics daily-metrics - progress-exhaust - userinfo-exhaust - response-exhaust - progress-exhaust-v2 - response-exhaust-v2 - course-batch-status-updater - cassandra-migration - druid-dataset + druid-dataset uci-response-exhaust uci-private-exhaust diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml index cd2ac8b2d1..6567620e9c 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml @@ -50,32 +50,17 @@ collection-summary-report-v2 collection-summary-report - program-collection-summary-report - audit-metrics-report admin-user-reports admin-geo-reports - district-weekly - district-monthly - desktop-consumption-report - wfs - content-rating-updater - druid-query-processor - monitor-job-summ cassandra-migration course-enrollment-report - textbook-progress-report - etb-metrics - daily-metrics progress-exhaust userinfo-exhaust response-exhaust progress-exhaust-v2 response-exhaust-v2 course-batch-status-updater - druid-dataset score-metric-migration-job - uci-response-exhaust - uci-private-exhaust diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml index 6f8fd449c2..dba542fd06 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml @@ -230,7 +230,7 @@ return """<b>This parameter is not used</b>""" - pipelines/deploy/ed-dataproducts/Jenkinsfile + pipelines/deploy/lern-dataproducts/Jenkinsfile false From 1ed421fa280edb0921ec14335c9e200bd932a805 Mon Sep 17 00:00:00 2001 From: kumarks1122 Date: Thu, 12 Jan 2023 19:46:28 +0530 Subject: [PATCH 183/203] #000 | LERN and ED Dataproducts jenkins changes added --- .../Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml index dba542fd06..4fa8dea65d 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml @@ -169,7 +169,7 @@ return """<b>This parameter is not used</b>""" module <font color=dimgray size=2><b>It will deploy only lpa_core_dp_artifacts(batch-models & job-manager) jar.</b></font> - ed-dataproducts + lern-dataproducts false From f6227f351191811e98b1e311ba8734108fb63898 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 13 Jan 2023 13:38:48 +0530 Subject: [PATCH 184/203] fix: adding adminutil_learner_api_key consumer --- private_repo/ansible/inventory/dev/Core/common.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index d174f6ea24..6010476336 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -398,6 +398,10 @@ kong_consumers: - username: portal_loggedin_fallback_token groups: "{{ kong_all_consumer_groups }}" state: present + - username: adminutil_learner_api_key + groups: "{{ userAccess }}" + state: present + ## Grafana oauth From eb77fcfac79429dc50f392aa1d978f18514ef7bd Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 13 Jan 2023 13:44:27 +0530 Subject: [PATCH 185/203] fix: adding adminutls to learner api token Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/secrets.yml | 3 +++ private_repo/ansible/inventory/dev/key-generate.sh | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 68261f8ec5..c57a8bd37c 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -137,6 +137,9 @@ sunbird_loggedin_register_token: # Use portal_loggedin_register consumer tok sunbird_anonymous_default_token: # Use portal_anonymous_fallback_token consumer token sunbird_logged_default_token: # Use portal_loggedin_fallback_token consumer token +# adminutils to learner api token +adminutil_learner_api_auth_key: # Use adminutil_learner_api_key consumer token + # ------------------------------------------------------------------------------------------------------------ # # Optional variables - Can be left blank if you dont plan to use the intended features core_vault_monitor_alerts_slack_url: "" # Slack webhook for alerts from alertmanager diff --git a/private_repo/ansible/inventory/dev/key-generate.sh b/private_repo/ansible/inventory/dev/key-generate.sh index 2bf82230b0..e0514c6dd6 100755 --- a/private_repo/ansible/inventory/dev/key-generate.sh +++ b/private_repo/ansible/inventory/dev/key-generate.sh @@ -7,7 +7,7 @@ echo if [[ $vault_pass == $confirm_vault_pass ]] then echo "$vault_pass" > temp_vault_pass - cd Core/keys + mkdir -p Core/keys && cd Core/keys for i in {1..10}; do openssl genrsa -out mobile_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in mobile_devicev2_c$i -out mobile_devicev2_key$i -nocrypt && rm -rf mobile_devicev2_c$i ; done for i in {1..10}; do openssl genrsa -out accessv1_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in accessv1_c$i -out accessv1_key$i -nocrypt && rm -rf accessv1_c$i ; done for i in {1..10}; do openssl genrsa -out desktop_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in desktop_devicev2_c$i -out desktop_devicev2_key$i -nocrypt && rm -rf desktop_devicev2_c$i ; done From 6ab68620854580256361bb9fb7b8e703656e35b4 Mon Sep 17 00:00:00 2001 From: G33tha Date: Tue, 17 Jan 2023 16:25:48 +0530 Subject: [PATCH 186/203] Update youtube api key for content service (#3714) --- ansible/roles/stack-sunbird/defaults/main.yml | 6 +++++- .../templates/content-service_application.conf | 6 ++++++ private_repo/ansible/inventory/dev/Core/secrets.yml | 6 +++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 620ec5f4ad..7fbeb17d70 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1065,4 +1065,8 @@ inquiry_redis_host: "{{ sunbird_lp_redis_host }}" inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" ### LERN Release-5.0.1 -cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" \ No newline at end of file +cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" + +#Youtube Standard Licence Validation +youtube_app_name: fetch-youtube-license +youtube_api_key: "{{ lp_vault_youtube_api_key }}" diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index bb44a71828..837298ac30 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -490,6 +490,12 @@ cloud_storage_container: "{{ cloud_storage_content_bucketname }}" # Google Drive APIKEY learning_content_drive_apiKey = "{{ learning_content_drive_apiKey }}" +#Youtube Standard Licence Validation +learning.content.youtube.application.name="{{ youtube_app_name }}" +learning_content_youtube_apikey="{{ youtube_api_key }}" +youtube.license.regex.pattern=["\\?vi?=([^&]*)", "watch\\?.*v=([^&]*)", "(?:embed|vi?)/([^/?]*)","^([A-Za-z0-9\\-\\_]*)"] +learning.valid_license=["creativeCommon"] + kafka { urls : "{{ kafka_urls }}" topic.send.enable : true diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index c57a8bd37c..8bf07e91cd 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -200,4 +200,8 @@ cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" # Graylog -graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # email server password / api token \ No newline at end of file +graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # email server password / api token + +# ------------------------------------------------------------------------------------------------------------ # +# Optional variables - Can be left blank if you dont plan to use the intended features +lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site From 07dad19c7ef8585afe9bdefa23827558cf21f21f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 30 Jan 2023 11:10:13 +0530 Subject: [PATCH 187/203] fix: ED-1164 adding missing UCI job Signed-off-by: Keshav Prasad --- .../UCI/jobs/uci-transport-socket/config.xml | 108 ++++++++++++ .../UCI/jobs/uci-transport-socket/config.xml | 77 +++++++++ .../UCI/jobs/uci-transport-socket/config.xml | 159 ++++++++++++++++++ 3 files changed, 344 insertions(+) create mode 100644 deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml create mode 100644 deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml new file mode 100644 index 0000000000..afba1c3fa0 --- /dev/null +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml @@ -0,0 +1,108 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + 10 + -1 + 1 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + Build/UCI/uci-transport-socket + false + + + image_tag + <font color=darkgreen size=2><b>OPTIONAL: Specify the tag to upload a specific image version to the container registry.</b></font> + + false + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Push the docker image to container registry. +</b></font> + + + ArtifactRepo + + + + + + + 0 + 0 + + false + project + false + + + + + + + + Build/UCI/uci-transport-socket + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${public_repo_branch} + + + false + + + + true + false + + 0 + false + + + + pipelines/upload/docker/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml b/deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml new file mode 100644 index 0000000000..c63ac79a73 --- /dev/null +++ b/deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml @@ -0,0 +1,77 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + 10 + -1 + 1 + + + + + false + false + + + + + github_release_tag + <font style="color:dimgray;font-size:14px;"><b> +<li>To build from a tag, use refs/tags/github_tag</li> +<li>To build from a branch, use refs/heads/github_branch</li> +<li>The default value of ${public_repo_branch} will be the release / tag version set in global configuration</li> +<li>To build from a differnt branch, replace the ${public_repo_branch} with your branch</li> +</b></font> + refs/heads/${public_repo_branch} + true + + + + + 0 + 0 + + false + project + false + + + + + + + + + + 2 + + + https://github.com/samagra-comms/transport-socket.git + + + + + master + + + false + + + + build/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml new file mode 100644 index 0000000000..b418c503df --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml @@ -0,0 +1,159 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + ArtifactUpload/dev/UCI/uci-transport-socket + false + + + image_tag + <font color=red size=2><b>CAUTION: If the value is blank, image tag will be taken from the latest metadata.json.</b></font> + + false + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + uci-transport-socket + Deploy/dev/UCI/uci-transport-socket + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + uci-transport-socket + Deploy/dev/UCI/uci-transport-socket + + + ET_FORMATTED_HTML + true + + + role_name + + + + sunbird-deploy + + + + + + + 0 + 0 + + false + project + false + + + + + + + + ArtifactUpload/dev/UCI/uci-transport-socket + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + uci-transport-socket + + + false + + + + true + false + + 0 + false + + + + kubernetes/pipelines/deploy_core/Jenkinsfile + false + + + false + From 0fd5d9a4da250ce4ac5eafad8a2aecc823c28a0d Mon Sep 17 00:00:00 2001 From: Kenneth Heung Date: Mon, 30 Jan 2023 16:05:06 +0800 Subject: [PATCH 188/203] adding oci-cli in bootstrap and Jenkins job template (#3715) --- ansible/bootstrap.yml | 13 ++++++++-- ansible/roles/oci-cli/defaults/main.yml | 1 + ansible/roles/oci-cli/tasks/main.yml | 24 +++++++++++++++++++ .../dev/jobs/Core/jobs/Bootstrap/config.xml | 5 ++-- 4 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 ansible/roles/oci-cli/defaults/main.yml create mode 100644 ansible/roles/oci-cli/tasks/main.yml diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index b23479e833..e77a1cb038 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -48,7 +48,17 @@ roles: - role: aws-cli tags: - - aws_cli + - aws_cli + +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: oci-cli + tags: + - oci_cli - hosts: "{{ hosts| default('all') }}" become: yes @@ -60,4 +70,3 @@ - vm-agents-nodeexporter tags: - node_exporter - diff --git a/ansible/roles/oci-cli/defaults/main.yml b/ansible/roles/oci-cli/defaults/main.yml new file mode 100644 index 0000000000..00a8940a29 --- /dev/null +++ b/ansible/roles/oci-cli/defaults/main.yml @@ -0,0 +1 @@ +oci_cli_url: https://github.com/oracle/oci-cli/releases/download/v3.22.0/oci-cli-3.22.0-Ubuntu-18.04-Offline.zip diff --git a/ansible/roles/oci-cli/tasks/main.yml b/ansible/roles/oci-cli/tasks/main.yml new file mode 100644 index 0000000000..8f21263672 --- /dev/null +++ b/ansible/roles/oci-cli/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Download the installation file + get_url: + url: "{{ oci_cli_url }}" + dest: /tmp/ocicli.zip + +- name: Installing unzip + apt: + name: "{{item}}" + state: latest + with_items: + - zip + - unzip + +- name: Unzip the installer + unarchive: + src: /tmp/ocicli.zip + dest: /tmp/ + remote_src: yes + +- name: install oci cli + shell: ./oci-cli-installation/install.sh --install-dir {{ ansible_env.HOME }} --exec-dir {{ ansible_env.HOME }} --script-dir {{ ansible_env.HOME }} --accept-all-defaults + args: + chdir: /tmp/ diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index b95bca2645..e11b5b5843 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -96,6 +96,7 @@ return """<b>This parameter is not used</b>""" 'azure_cli', 'aws_cli', 'gcloud_cli', +'oci_cli' 'all'] true @@ -123,7 +124,7 @@ return """<b>This parameter is not used</b>""" false - + @@ -155,4 +156,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + From 612fbfa8fc4f5e8c6080e236215b136fa71d2035 Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Fri, 3 Feb 2023 20:19:20 +0530 Subject: [PATCH 189/203] ED-1173 OfflineInstaller deploy job fix (#3721) * updated electronuserland/builder tag form wine to 16-wine * added npm update command for electronuserland/builder:16-wine --- ansible/roles/desktop-deploy/templates/build.sh.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/roles/desktop-deploy/templates/build.sh.j2 b/ansible/roles/desktop-deploy/templates/build.sh.j2 index 2f431915e8..1fd121f219 100644 --- a/ansible/roles/desktop-deploy/templates/build.sh.j2 +++ b/ansible/roles/desktop-deploy/templates/build.sh.j2 @@ -4,8 +4,9 @@ set -eo pipefail cd {{offline_repo_location}}/ # Run the docker image and run the OS Specific build along with environment specific build -docker run -d --env-file envfile --env ELECTRON_CACHE="/root/.cache/electron" --env ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" --name offline_deploy -w /project electronuserland/builder:wine sleep infinity +docker run -d --env-file envfile --env ELECTRON_CACHE="/root/.cache/electron" --env ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" --name offline_deploy -w /project electronuserland/builder:16-wine sleep infinity docker cp . offline_deploy:/project/ +docker exec offline_deploy npm install -g npm@9.4.1 docker exec offline_deploy bash -x /project/setupOfflineInstaller.sh # Copy the built artifacts From bef49da693058119696970271639ea09d09f7a76 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 7 Feb 2023 15:37:47 +0530 Subject: [PATCH 190/203] Update config.j2 --- ansible/roles/ml-analytics-service/templates/config.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 27da8be26b..70fe0ff018 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -150,7 +150,7 @@ survey_streaming_success = {{ ml_analytics_survey_log_folder_path }}/success.log survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log -{% if ML_Cloud_Service_Provider is eq 'ORACLE' %} +{% if ML_Cloud_Service_Provider is equalto 'ORACLE' %} [ORACLE] @@ -164,7 +164,7 @@ region_name = {{ cloud_public_storage_region }} bucket_name = {{ cloud_storage_telemetry_bucketname }} -{% elif ML_Cloud_Service_Provider is eq 'gcloud' %} +{% elif ML_Cloud_Service_Provider is equalto 'gcloud' %} [GCP] @@ -172,7 +172,7 @@ secret_data = {{ ml_Cloud_secret_json_file }} bucket_name = {{ cloud_storage_telemetry_bucketname }} -{% elif ML_Cloud_Service_Provider is eq 'aws' %} +{% elif ML_Cloud_Service_Provider is equalto 'aws' %} [AWS] From c603f1e24692b5727d1c847d7f9f74f8c199ebbc Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:01:57 +0530 Subject: [PATCH 191/203] ED-700: Update Monitoring Stack Helm Chart (#3723) * Update CRDs Api version and definitions Remove depricated webhook crd-install * Let helm takecare of creating CRDs * Update admission webhook and rbac API version * Update admissionwebhook patch repo * Update apiVersions --- .../charts/grafana/templates/role.yaml | 2 +- .../charts/grafana/templates/rolebinding.yaml | 2 +- .../templates/clusterrole.yaml | 2 +- .../templates/clusterrolebinding.yaml | 2 +- .../crds/crd-alertmanager.yaml | 8522 ++++++------ .../crds/crd-podmonitor.yaml | 449 +- .../crds/crd-prometheus.yaml | 10950 ++++++++-------- .../crds/crd-prometheusrules.yaml | 143 +- .../crds/crd-servicemonitor.yaml | 831 +- .../crds/crd-thanosrulers.yaml | 8903 +++++++------ .../mutatingWebhookConfiguration.yaml | 6 +- .../validatingWebhookConfiguration.yaml | 6 +- .../templates/prometheus-operator/crds.yaml | 6 - .../prometheus-operator/values.yaml | 6 +- .../templates/role.yaml | 2 +- .../templates/rolebinding.yaml | 2 +- 16 files changed, 14914 insertions(+), 14920 deletions(-) delete mode 100755 kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml index c95c1d0424..6a673b7b7d 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ template "grafana.fullname" . }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml index c42229bf92..74ec303061 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ template "grafana.fullname" . }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml index 319aec16c2..a9198b823d 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml index 4635985aa0..160db8bd18 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml index cbf9fc27f9..2609b2f09b 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml @@ -1,25 +1,12 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: alertmanagers.monitoring.coreos.com spec: - additionalPrinterColumns: - - JSONPath: .spec.version - description: The version of Alertmanager - name: Version - type: string - - JSONPath: .spec.replicas - description: The desired replicas number of Alertmanagers - name: Replicas - type: integer - - JSONPath: .metadata.creationTimestamp - name: Age - type: date group: monitoring.coreos.com names: kind: Alertmanager @@ -28,4474 +15,4485 @@ spec: singular: alertmanager preserveUnknownFields: false scope: Namespaced - subresources: {} - validation: - openAPIV3Schema: - description: Alertmanager describes an Alertmanager cluster. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the Alertmanager - cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - additionalPeers: - description: AdditionalPeers allows injecting a set of additional Alertmanagers - to peer with to form a highly available cluster. - items: - type: string - type: array - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all - objects with implicit weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no objects (i.e. is also - a no-op). + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Alertmanager describes an Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Alertmanager + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalPeers: + description: AdditionalPeers allows injecting a set of additional Alertmanagers + to peer with to form a highly available cluster. + items: + type: string + type: array + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array required: - - preference - - weight + - nodeSelectorTerms type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The - terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + baseImage: + description: Base image that is used to deploy pods, without tag. + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Alertmanager object, which shall be mounted into the Alertmanager + Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + items: + type: string + type: array + configSecret: + description: ConfigSecret is the name of a Kubernetes Secret in the + same namespace as the Alertmanager object, which contains configuration + for this Alertmanager instance. Defaults to 'alertmanager-' + The secret is mounted into /etc/alertmanager/config. + type: string + containers: + description: Containers allows injecting additional containers. This + is meant to allow adding an authentication proxy to an Alertmanager + pod. + items: + description: A single application container that you want to run within + a pod. properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) + description: EnvVar represents an environment variable present + in a Container. properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may not - try to eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding to - each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the anti-affinity expressions specified by this - field, but it may choose a node that violates one or more - of the expressions. The node that is most preferred is the - one with the greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field - and adding "weight" to the sum if the node has pods which - matches the corresponding podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer required: - - podAffinityTerm - - weight + - name type: object type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms must - be satisfied. + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running + description: EnvFromSource represents the source of a set of + ConfigMaps properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. + configMapRef: + description: The ConfigMap to select from properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string - required: - - topologyKey + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object type: array - type: object - type: object - baseImage: - description: Base image that is used to deploy pods, without tag. - type: string - configMaps: - description: ConfigMaps is a list of ConfigMaps in the same namespace - as the Alertmanager object, which shall be mounted into the Alertmanager - Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - items: - type: string - type: array - configSecret: - description: ConfigSecret is the name of a Kubernetes Secret in the - same namespace as the Alertmanager object, which contains configuration - for this Alertmanager instance. Defaults to 'alertmanager-' - The secret is mounted into /etc/alertmanager/config. - type: string - containers: - description: Containers allows injecting additional containers. This - is meant to allow adding an authentication proxy to an Alertmanager - pod. - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - key: - description: The key to select. + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string required: - - port + - devicePath + - name type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - properties: - level: - description: Level is SELinux level label that applies - to the container. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - role: - description: Role is a SELinux role label that applies - to the container. + name: + description: This must match the Name of a Volume. type: string - type: - description: Type is a SELinux type label that applies - to the container. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - user: - description: User is a SELinux user label that applies - to the container. + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string + required: + - mountPath + - name type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + externalUrl: + description: The external URL the Alertmanager instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Alertmanager is not served from root of a DNS name. + type: string + image: + description: Image if specified has precedence over baseImage, tag and + sha combinations. Specifying the version is still necessary to ensure + the Prometheus Operator knows what version of Alertmanager is being + configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace + to use for pulling prometheus and alertmanager images from registries + see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let + you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Alertmanager configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart of + the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching is + entirely outside the scope of what the maintainers will support and + by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. - items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name - type: object - type: array - externalUrl: - description: The external URL the Alertmanager instances will be available - under. This is necessary to generate correct URLs. This is necessary - if Alertmanager is not served from root of a DNS name. - type: string - image: - description: Image if specified has precedence over baseImage, tag and - sha combinations. Specifying the version is still necessary to ensure - the Prometheus Operator knows what version of Alertmanager is being - configured. - type: string - imagePullSecrets: - description: An optional list of references to secrets in the same namespace - to use for pulling prometheus and alertmanager images from registries - see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - initContainers: - description: 'InitContainers allows adding initContainers to the pod - definition. Those can be used to e.g. fetch secrets for injection - into the Alertmanager configuration from external sources. Any errors - during the execution of an initContainer will lead to a restart of - the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other then secret fetching is - entirely outside the scope of what the maintainers will support and - by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - key: - description: The key to select. + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - containerPort type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + role: + description: Role is a SELinux role label that applies + to the container. type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - user: - description: User is a SELinux user label that applies - to the container. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string + required: + - devicePath + - name type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. - type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + name: + description: This must match the Name of a Volume. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - mountPath + - name type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Alertmanager server listen on loopback, + so that it does not bind against the Pod IP. Note this is only for + the Alertmanager UI, not the gossip communication. + type: boolean + logFormat: + description: Log format for Alertmanager to be configured with. + type: string + logLevel: + description: Log level for Alertmanager to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + paused: + description: If set to true all actions on the underlaying managed objects + are not goint to be performed, except for delete actions. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are + propagated to the alertmanager pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + type: object + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + replicas: + description: Size is the expected size of the alertmanager cluster. + The controller will eventually make the size of the running cluster + equal to the expected size. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration Alertmanager shall retain data for. Default + is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` + (milliseconds seconds minutes hours). + type: string + routePrefix: + description: The route prefix Alertmanager registers HTTP handlers for. + This is useful, if using ExternalURL and a proxy is rewriting HTTP + routes of a request, and the actual ExternalURL is still true, but + the server serves requests under a different route prefix. For example + for use with `kubectl proxy`. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the + Alertmanager object, which shall be mounted into the Alertmanager + Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers + in a pod. Some volume types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The owning GID will + be the FSGroup 2. The setgid bit is set (new files created in + the volume will be owned by FSGroup) 3. The permission bits are + OR'd with rw-rw---- \n If unset, the Kubelet will not modify the + ownership and permissions of any volume." + format: int64 + type: integer + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux + context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to the + container. + type: string + role: + description: Role is a SELinux role label that applies to the + container. + type: string + type: + description: Type is a SELinux type label that applies to the + container. + type: string + user: + description: User is a SELinux user label that applies to the + container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in + each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object + format: int64 + type: integer type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. items: - description: VolumeMount describes a mounting of a Volume within - a container. + description: Sysctl defines a kernel parameter to be set properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). + description: Name of a property to set type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. + value: + description: Value of a property to set type: string required: - - mountPath - name + - value type: object type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and is only + honored by servers that enable the WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of + the container process. Defaults to the user specified in image + metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. This + field is beta-level and may be disabled with the WindowsRunAsUserName + feature flag. + type: string + type: object type: object - type: array - listenLocal: - description: ListenLocal makes the Alertmanager server listen on loopback, - so that it does not bind against the Pod IP. Note this is only for - the Alertmanager UI, not the gossip communication. - type: boolean - logFormat: - description: Log format for Alertmanager to be configured with. - type: string - logLevel: - description: Log level for Alertmanager to be configured with. - type: string - nodeSelector: - additionalProperties: + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to + use to run the Prometheus Pods. type: string - description: Define which Nodes the Pods are scheduled on. - type: object - paused: - description: If set to true all actions on the underlaying managed objects - are not goint to be performed, except for delete actions. - type: boolean - podMetadata: - description: PodMetadata configures Labels and Annotations which are - propagated to the alertmanager pods. - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' - type: object - type: object - portName: - description: Port name used for the pods and governing service. This - defaults to web - type: string - priorityClassName: - description: Priority class assigned to the Pods - type: string - replicas: - description: Size is the expected size of the alertmanager cluster. - The controller will eventually make the size of the running cluster - equal to the expected size. - format: int32 - type: integer - resources: - description: Define resources requests and limits for single Pods. - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - retention: - description: Time duration Alertmanager shall retain data for. Default - is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` - (milliseconds seconds minutes hours). - type: string - routePrefix: - description: The route prefix Alertmanager registers HTTP handlers for. - This is useful, if using ExternalURL and a proxy is rewriting HTTP - routes of a request, and the actual ExternalURL is still true, but - the server serves requests under a different route prefix. For example - for use with `kubectl proxy`. - type: string - secrets: - description: Secrets is a list of Secrets in the same namespace as the - Alertmanager object, which shall be mounted into the Alertmanager - Pods. The Secrets are mounted into /etc/alertmanager/secrets/. - items: + sha: + description: SHA of Alertmanager container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. type: string - type: array - securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. This defaults to the default PodSecurityContext. - properties: - fsGroup: - description: "A special supplemental group that applies to all containers - in a pod. Some volume types allow the Kubelet to change the ownership - of that volume to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files created in - the volume will be owned by FSGroup) 3. The permission bits are - OR'd with rw-rw---- \n If unset, the Kubelet will not modify the - ownership and permissions of any volume." - format: int64 - type: integer - runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. May - also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux - context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. - type: string - type: object - supplementalGroups: - description: A list of groups applied to the first process run in - each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. - items: - format: int64 - type: integer - type: array - sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. - items: - description: Sysctl defines a kernel parameter to be set + storage: + description: Storage is the definition of how storage will be used by + the Alertmanager instances. + properties: + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: - name: - description: Name of a property to set + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - value: - description: Value of a property to set + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' type: string - required: - - name - - value type: object - type: array - windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. This field is alpha-level and is only - honored by servers that enable the WindowsGMSA feature flag. - type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint of - the container process. Defaults to the user specified in image - metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. This - field is beta-level and may be disabled with the WindowsRunAsUserName - feature flag. - type: string - type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount to - use to run the Prometheus Pods. - type: string - sha: - description: SHA of Alertmanager container image to be deployed. Defaults - to the value of `version`. Similar to a tag, but the SHA explicitly - deploys an immutable container image. Version and Tag are ignored - if SHA is set. - type: string - storage: - description: Storage is the definition of how storage will be used by - the Alertmanager instances. - properties: - emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' - properties: - medium: - description: 'What type of storage medium should back this directory. - The default is "" which means to use the node''s default medium. - Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. The - default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - type: object - spec: - description: 'Spec defines the desired characteristics of a - volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner - can support VolumeSnapshot data source, it will create - a new volume and data will be restored to the volume at - the same time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the - provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: 'Spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner + can support VolumeSnapshot data source, it will create + a new volume and data will be restored to the volume at + the same time. If the provisioner does not support VolumeSnapshot + data source, volume will not be created and the failure + will be reported as an event. In the future, we plan to + support more data source types and the behavior of the + provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. type: string - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + kind: + description: Kind is the type of resource being referenced type: string - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: + name: + description: Name is the name of resource being referenced type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + type: string + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details + about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is + a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tag: + description: Tag of Alertmanager container image to be deployed. Defaults + to the value of `version`. Version is ignored if Tag is set. + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + version: + description: Version the cluster should be on. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the alertmanager container, that + are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a + container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When not + set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false + or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's + environment. Defaults to "" (volume's root). SubPathExpr and + SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on the + output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName type: object - status: - description: 'Status represents the current information/status - of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime properties: - accessModes: - description: 'AccessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array - capacity: - additionalProperties: - type: string - description: Represents the actual resources of the underlying - volume. + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string type: object - conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. items: - description: PersistentVolumeClaimCondition contails details - about state of pvc + description: Maps a string key to a path within a volume. properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned - from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details - about last transition. - type: string - reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. - type: string - status: + key: + description: The key to project. type: string - type: - description: PersistentVolumeClaimConditionType is - a valid value of PersistentVolumeClaimCondition.Type + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. type: string required: - - status - - type + - key + - path type: object type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean type: object - type: object - type: object - tag: - description: Tag of Alertmanager container image to be deployed. Defaults - to the value of `version`. Version is ignored if Tag is set. - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, operator - must be Exists; this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. Exists - is equivalent to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the - toleration (which must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By default, it is not - set, which means tolerate the taint forever (do not evict). - Zero and negative values will be treated as 0 (evict immediately) - by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - version: - description: Version the cluster should be on. - type: string - volumeMounts: - description: VolumeMounts allows configuration of additional VolumeMounts - on the output StatefulSet definition. VolumeMounts specified will - be appended to other VolumeMounts in the alertmanager container, that - are generated as a result of StorageSpec objects. - items: - description: VolumeMount describes a mounting of a Volume within a - container. - properties: - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When not - set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false - or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's - volume should be mounted. Behaves similarly to SubPath but environment - variable references $(VAR_NAME) are expanded using the container's - environment. Defaults to "" (volume's root). SubPathExpr and - SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - volumes: - description: Volumes allows configuration of additional volumes on the - output StatefulSet definition. Volumes specified will be appended - to other volumes that are generated as a result of StorageSpec objects. - items: - description: Volume represents a named volume in a pod that may be - accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'Unique ID of the persistent disk resource in - AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: AzureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' - type: string - diskName: - description: The Name of the data disk in the blob storage - type: string - diskURI: - description: The URI the data disk in the blob storage - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: AzureFile represents an Azure File Service mount - on the host and bind mount to the pod. - properties: - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: the name of secret that contains Azure Storage - Account Name and Key - type: string - shareName: - description: Share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: CephFS represents a Ceph FS mount on the host that - shares a pod's lifetime - properties: - monitors: - description: 'Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. type: string - type: array - path: - description: 'Optional: Used as the mounted root, rather than - the full Ceph tree, default is /' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'Cinder represents a cinder volume attached and mounted - on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - volumeID: - description: 'volume id used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: ConfigMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced ConfigMap will be projected into - the volume as a file whose name is the key and content is - the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the - ConfigMap, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its keys must - be defined - type: boolean - type: object - csi: - description: CSI (Container Storage Interface) represents storage - that is handled by an external CSI driver (Alpha feature). - properties: - driver: - description: Driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to the - associated CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the secret - object containing sensitive information to pass to the CSI - driver to complete the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be empty if no secret - is required. If the secret object contains more than one - secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: type: string - type: object - readOnly: - description: Specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: VolumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: DownwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name - of the file to be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 encoded. The - first item of the relative path must not start with - ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. type: object - type: array - type: object - emptyDir: - description: 'EmptyDir represents a temporary directory that shares - a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - fc: - description: FC represents a Fibre Channel resource that is attached - to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - lun: - description: 'Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - type: array - wwids: - description: 'Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be - set, but not both simultaneously.' - items: + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' type: string - type: array - type: object - flexVolume: - description: FlexVolume represents a generic volume resource that - is provisioned/attached using an exec based plugin. - properties: - driver: - description: Driver is the name of the driver to use for this - volume. - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. - type: string - options: - additionalProperties: + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' type: string - description: 'Optional: Extra command options if any.' - type: object - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the plugin - scripts. This may be empty if no secret object is specified. - If the secret object contains more than one secret, all - secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: type: string - type: object - required: - - driver - type: object - flocker: - description: Flocker represents a Flocker volume attached to a - kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'GitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision a - container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: Target directory name. Must not contain or start - with '..'. If '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, the volume - will contain the git repository in the subdirectory with - the given name. - type: string - repository: - description: Repository URL - type: string - revision: - description: Commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the host - that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'EndpointsName is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'Path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'ReadOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'HostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'Path of the directory on the host. If the path - is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'Type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to the - pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. - type: string - iqn: - description: Target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). - items: + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this + volume. type: string - type: array - readOnly: - description: ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: type: string - type: object - targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'Volume''s name. Must be a DNS_LABEL and unique within - the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'NFS represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'Path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'ReadOnly here will force the NFS export to be - mounted with read-only permissions. Defaults to false. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'Server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a reference - to a PersistentVolumeClaim in the same namespace. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: ID that identifies Photon Controller persistent - disk - type: string - required: - - pdID - type: object - portworxVolume: - description: PortworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: FSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: VolumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: Items for all in one resources secrets, configmaps, - and downward API - properties: - defaultMode: - description: Mode bits to use on created files by default. - Must be a value between 0 and 0777. Directories within the - path are not affected by this setting. This might be in - conflict with other options that affect the file mode, like - fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: list of volume projections - items: - description: Projection that may be projected along with - other supported volume types + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' properties: - configMap: - description: information about the configMap data to - project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - downwardAPI: - description: information about the downwardAPI data - to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format - of the exposed resources, defaults to - "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: information about the secret data to project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format + of the exposed resources, defaults to + "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project - properties: - audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the mount - point of the file to project the token into. - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - description: Quobyte represents a Quobyte mount on the host that - shares a pod's lifetime - properties: - group: - description: Group to map volume access to Default is no group - type: string - readOnly: - description: ReadOnly here will force the Quobyte volume to - be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: Registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: Tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: User to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: Volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'RBD represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'Keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group type: string - type: array - pool: - description: 'The rados pool name. Default is rbd. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'SecretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: ScaleIO represents a ScaleIO persistent volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: The host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: type: string - type: object - sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false - type: boolean - storageMode: - description: Indicates whether the storage for a volume should - be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. - type: string - system: - description: The name of the storage system as configured - in ScaleIO. - type: string - volumeName: - description: The name of a volume already created in the ScaleIO - system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'Secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the - value. If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be present. - If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' path - or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: - key: - description: The key to project. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - optional: - description: Specify whether the Secret or its keys must be - defined - type: boolean - secretName: - description: 'Name of the secret in the pod''s namespace to - use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: StorageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - volumeName: - description: VolumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within a - namespace. - type: string - volumeNamespace: - description: VolumeNamespace specifies the scope of the volume - within StorageOS. If no namespace is specified then the - Pod's namespace will be used. This allows the Kubernetes - name scoping to be mirrored within StorageOS for tighter - integration. Set VolumeName to any name to override the - default behaviour. Set to "default" if you are not using - namespaces within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: VsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. - type: string - volumePath: - description: Path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - type: object - status: - description: 'Most recent observed status of the Alertmanager cluster. Read-only. - Not included when requesting from the apiserver, only from the Prometheus - Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) - targeted by this Alertmanager cluster. - format: int32 - type: integer - paused: - description: Represents whether any actions on the underlaying managed - objects are being performed. Only delete actions will be performed. - type: boolean - replicas: - description: Total number of non-terminated pods targeted by this Alertmanager - cluster (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable pods targeted by this Alertmanager - cluster. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated pods targeted by this Alertmanager - cluster that have the desired version spec. - format: int32 - type: integer - required: - - availableReplicas - - paused - - replicas - - unavailableReplicas - - updatedReplicas - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the Alertmanager cluster. Read-only. + Not included when requesting from the apiserver, only from the Prometheus + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Alertmanager cluster. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlaying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Alertmanager + cluster (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Alertmanager + cluster. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Alertmanager + cluster that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + additionalPrinterColumns: + - jsonPath: .spec.version + description: The version of Alertmanager + name: Version + type: string + - jsonPath: .spec.replicas + description: The desired replicas number of Alertmanagers + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date served: true storage: true + subresources: {} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml index ab2af4e7cb..71cb4b2130 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: podmonitors.monitoring.coreos.com spec: @@ -16,246 +15,246 @@ spec: singular: podmonitor preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: PodMonitor defines monitoring for a set of pods. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired Pod selection for target discovery - by Prometheus. - properties: - jobLabel: - description: The label to use to retrieve the job name from. - type: string - namespaceSelector: - description: Selector to select which namespaces the Endpoints objects - are discovered from. - properties: - any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. - type: boolean - matchNames: - description: List of namespace names. - items: - type: string - type: array - type: object - podMetricsEndpoints: - description: A list of endpoints allowed as part of this PodMonitor. - items: - description: PodMetricsEndpoint defines a scrapeable endpoint of a - Kubernetes Pod serving Prometheus metrics. + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PodMonitor defines monitoring for a set of pods. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Pod selection for target discovery + by Prometheus. + properties: + jobLabel: + description: The label to use to retrieve the job name from. + type: string + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects + are discovered from. properties: - honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. type: boolean - honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. - type: boolean - interval: - description: Interval at which metrics should be scraped - type: string - metricRelabelings: - description: MetricRelabelConfigs to apply to samples before ingestion. + matchNames: + description: List of namespace names. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' - properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: + type: string + type: array + type: object + podMetricsEndpoints: + description: A list of endpoints allowed as part of this PodMonitor. + items: + description: PodMetricsEndpoint defines a scrapeable endpoint of a + Kubernetes Pod serving Prometheus metrics. + properties: + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + params: + additionalProperties: + items: type: string + type: array + description: Optional HTTP URL parameters type: object - type: array - params: - additionalProperties: + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the pod port this endpoint refers to. Mutually + exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before ingestion. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: - type: string + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object type: array - description: Optional HTTP URL parameters - type: object - path: - description: HTTP path to scrape for metrics. - type: string - port: - description: Name of the pod port this endpoint refers to. Mutually - exclusive with targetPort. - type: string - proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. - type: string - relabelings: - description: 'RelabelConfigs to apply to samples before ingestion. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Deprecated: Use ''port'' instead.' + x-kubernetes-int-or-string: true + type: object + type: array + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod + onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Pod objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' + key: + description: key is the label key that the selector applies + to. type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string + required: + - key + - operator type: object type: array - scheme: - description: HTTP scheme to use for scraping. - type: string - scrapeTimeout: - description: Timeout after which the scrape is ended - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: 'Deprecated: Use ''port'' instead.' - x-kubernetes-int-or-string: true - type: object - type: array - podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes Pod - onto the target. - items: - type: string - type: array - sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. - format: int64 - type: integer - selector: - description: Selector to select Pod objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - required: - - podMetricsEndpoints - - selector - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + type: object + required: + - podMetricsEndpoints + - selector + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml index 3699396f1c..669325a996 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml @@ -1,25 +1,12 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: prometheuses.monitoring.coreos.com spec: - additionalPrinterColumns: - - JSONPath: .spec.version - description: The version of Prometheus - name: Version - type: string - - JSONPath: .spec.replicas - description: The desired replicas number of Prometheuses - name: Replicas - type: integer - - JSONPath: .metadata.creationTimestamp - name: Age - type: date group: monitoring.coreos.com names: kind: Prometheus @@ -28,2278 +15,3469 @@ spec: singular: prometheus preserveUnknownFields: false scope: Namespaced - subresources: {} - validation: - openAPIV3Schema: - description: Prometheus defines a Prometheus deployment. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the Prometheus cluster. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - additionalAlertManagerConfigs: - description: 'AdditionalAlertManagerConfigs allows specifying a key - of a Secret containing additional Prometheus AlertManager configurations. - AlertManager configurations specified are appended to the configurations - generated by the Prometheus Operator. Job configurations specified - must have the form as specified in the official Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. - As AlertManager configs are appended, the user is responsible to make - sure it is valid. Note that using this feature may expose the possibility - to break upgrades of Prometheus. It is advised to review Prometheus - release notes to ensure that no incompatible AlertManager configs - are going to break Prometheus after the upgrade.' - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - additionalAlertRelabelConfigs: - description: 'AdditionalAlertRelabelConfigs allows specifying a key - of a Secret containing additional Prometheus alert relabel configurations. - Alert relabel configurations specified are appended to the configurations - generated by the Prometheus Operator. Alert relabel configurations - specified must have the form as specified in the official Prometheus - documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - As alert relabel configs are appended, the user is responsible to - make sure it is valid. Note that using this feature may expose the - possibility to break upgrades of Prometheus. It is advised to review - Prometheus release notes to ensure that no incompatible alert relabel - configs are going to break Prometheus after the upgrade.' - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - additionalScrapeConfigs: - description: 'AdditionalScrapeConfigs allows specifying a key of a Secret - containing additional Prometheus scrape configurations. Scrape configurations - specified are appended to the configurations generated by the Prometheus - Operator. Job configurations specified must have the form as specified - in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. - As scrape configs are appended, the user is responsible to make sure - it is valid. Note that using this feature may expose the possibility - to break upgrades of Prometheus. It is advised to review Prometheus - release notes to ensure that no incompatible scrape configs are going - to break Prometheus after the upgrade.' - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all - objects with implicit weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no objects (i.e. is also - a no-op). + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Prometheus defines a Prometheus deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Prometheus cluster. + More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalAlertManagerConfigs: + description: 'AdditionalAlertManagerConfigs allows specifying a key + of a Secret containing additional Prometheus AlertManager configurations. + AlertManager configurations specified are appended to the configurations + generated by the Prometheus Operator. Job configurations specified + must have the form as specified in the official Prometheus documentation: + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. + As AlertManager configs are appended, the user is responsible to make + sure it is valid. Note that using this feature may expose the possibility + to break upgrades of Prometheus. It is advised to review Prometheus + release notes to ensure that no incompatible AlertManager configs + are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalAlertRelabelConfigs: + description: 'AdditionalAlertRelabelConfigs allows specifying a key + of a Secret containing additional Prometheus alert relabel configurations. + Alert relabel configurations specified are appended to the configurations + generated by the Prometheus Operator. Alert relabel configurations + specified must have the form as specified in the official Prometheus + documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + As alert relabel configs are appended, the user is responsible to + make sure it is valid. Note that using this feature may expose the + possibility to break upgrades of Prometheus. It is advised to review + Prometheus release notes to ensure that no incompatible alert relabel + configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalScrapeConfigs: + description: 'AdditionalScrapeConfigs allows specifying a key of a Secret + containing additional Prometheus scrape configurations. Scrape configurations + specified are appended to the configurations generated by the Prometheus + Operator. Job configurations specified must have the form as specified + in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + As scrape configs are appended, the user is responsible to make sure + it is valid. Note that using this feature may expose the possibility + to break upgrades of Prometheus. It is advised to review Prometheus + release notes to ensure that no incompatible scrape configs are going + to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alerting: + description: Define details regarding alerting. + properties: + alertmanagers: + description: AlertmanagerEndpoints Prometheus should fire alerts + against. + items: + description: AlertmanagerEndpoints defines a selection of a single + Endpoints object containing alertmanager IPs to fire alerts + against. + properties: + apiVersion: + description: Version of the Alertmanager API that Prometheus + uses to send alerts. It can be "v1" or "v2". + type: string + bearerTokenFile: + description: BearerTokenFile to read from filesystem to use + when authenticating to Alertmanager. + type: string + name: + description: Name of Endpoints object in Namespace. + type: string + namespace: + description: Namespace of Endpoints object. + type: string + pathPrefix: + description: Prefix for the HTTP path alerts are pushed to. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port the Alertmanager API is exposed on. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use when firing alerts. + type: string + tlsConfig: + description: TLS Config to use for alertmanager connection. + properties: + ca: + description: Stuct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for + the targets. properties: key: - description: The label key that the selector - applies to. + description: The key to select. type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean required: - key - - operator type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The - terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + secret: + description: Secret containing data to use for the + targets. properties: key: - description: The label key that the selector - applies to. + description: The key of the secret to select from. Must + be a valid secret key. type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean required: - key - - operator type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for + the targets. properties: key: - description: The label key that the selector - applies to. + description: The key to select. type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean required: - key - - operator type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may not - try to eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding to - each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + secret: + description: Secret containing data to use for the + targets. properties: key: - description: key is the label key that the selector - applies to. + description: The key of the secret to select from. Must + be a valid secret key. type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean required: - key - - operator type: object - type: array - matchLabels: - additionalProperties: + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for + the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. + type: object + required: + - name + - namespace + - port + type: object + type: array + required: + - alertmanagers + type: object + apiserverConfig: + description: APIServerConfig allows specifying a host and auth methods + to access apiserver. If left empty, Prometheus is assumed to run inside + of the cluster and will discover API servers automatically and use + the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + properties: + basicAuth: + description: BasicAuth allow an endpoint to authenticate over basic + authentication + properties: + password: + description: The secret in the service monitor namespace that + contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean required: - - topologyKey + - key type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the anti-affinity expressions specified by this - field, but it may choose a node that violates one or more - of the expressions. The node that is most preferred is the - one with the greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field - and adding "weight" to the sum if the node has pods which - matches the corresponding podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: Bearer token for accessing apiserver. + type: string + bearerTokenFile: + description: File to read bearer token for accessing apiserver. + type: string + host: + description: Host of apiserver. A valid string consisting of a hostname + or IP followed by an optional port number + type: string + tlsConfig: + description: TLS Config to use for accessing apiserver. + properties: + ca: + description: Stuct containing the CA cert to use for the targets. properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. + configMap: + description: ConfigMap containing data to use for the targets. properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean required: - - topologyKey + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the + targets. properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. + configMap: + description: ConfigMap containing data to use for the targets. properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey type: object - type: array - type: object - type: object - alerting: - description: Define details regarding alerting. - properties: - alertmanagers: - description: AlertmanagerEndpoints Prometheus should fire alerts - against. - items: - description: AlertmanagerEndpoints defines a selection of a single - Endpoints object containing alertmanager IPs to fire alerts - against. - properties: - apiVersion: - description: Version of the Alertmanager API that Prometheus - uses to send alerts. It can be "v1" or "v2". - type: string - bearerTokenFile: - description: BearerTokenFile to read from filesystem to use - when authenticating to Alertmanager. + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. type: string - name: - description: Name of Endpoints object in Namespace. + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container + for the targets. type: string - namespace: - description: Namespace of Endpoints object. + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string - pathPrefix: - description: Prefix for the HTTP path alerts are pushed to. + type: object + required: + - host + type: object + arbitraryFSAccessThroughSMs: + description: ArbitraryFSAccessThroughSMs configures whether configuration + based on a service monitor can access arbitrary files on the file + system of the Prometheus container e.g. bearer token files. + properties: + deny: + type: boolean + type: object + baseImage: + description: Base image to use for a Prometheus deployment. + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Prometheus object, which shall be mounted into the Prometheus + Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/. + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or modifying + operator generated containers. This can be used to allow adding an + authentication proxy to a Prometheus pod or to change the behavior + of an operator generated container. Containers described here modify + an operator generated container if they share the same name and modifications + are done via a strategic merge patch. The current container names + are: `prometheus`, `prometheus-config-reloader`, `rules-configmap-reloader`, + and `thanos-sidecar`. Overriding containers is entirely outside the + scope of what the maintainers will support and by doing so, you accept + that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: type: string - port: - anyOf: - - type: integer - - type: string - description: Port the Alertmanager API is exposed on. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use when firing alerts. + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: type: string - tlsConfig: - description: TLS Config to use for alertmanager connection. + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. properties: - ca: - description: Stuct containing the CA cert to use for the - targets. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - configMap: - description: ConfigMap containing data to use for - the targets. + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: key: description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined + description: Specify whether the ConfigMap or its + key must be defined type: boolean required: - key type: object - secret: - description: Secret containing data to use for the - targets. + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + fieldPath: + description: Path of the field to select in the + specified API version. type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean required: - - key + - fieldPath type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for - the targets. - properties: - configMap: - description: ConfigMap containing data to use for - the targets. + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' properties: - key: - description: The key to select. + containerName: + description: 'Container name: required for volumes, + optional for env vars' type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean required: - - key + - resource type: object - secret: - description: Secret containing data to use for the - targets. + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined + description: Specify whether the Secret or its key + must be defined type: boolean required: - key type: object type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for - the targets. + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined + description: Specify whether the ConfigMap must be defined type: boolean - required: - - key type: object - serverName: - description: Used to verify the hostname for the targets. + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object - required: - - name - - namespace - - port - type: object - type: array - required: - - alertmanagers - type: object - apiserverConfig: - description: APIServerConfig allows specifying a host and auth methods - to access apiserver. If left empty, Prometheus is assumed to run inside - of the cluster and will discover API servers automatically and use - the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. - properties: - basicAuth: - description: BasicAuth allow an endpoint to authenticate over basic - authentication - properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object type: object - type: object - bearerToken: - description: Bearer token for accessing apiserver. - type: string - bearerTokenFile: - description: File to read bearer token for accessing apiserver. - type: string - host: - description: Host of apiserver. A valid string consisting of a hostname - or IP followed by an optional port number - type: string - tlsConfig: - description: TLS Config to use for accessing apiserver. - properties: - ca: - description: Stuct containing the CA cert to use for the targets. + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMap: - description: ConfigMap containing data to use for the targets. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - key: - description: The key to select. + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean required: - - key + - port type: object - secret: - description: Secret containing data to use for the targets. + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. type: string - cert: - description: Struct containing the client cert file for the - targets. + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMap: - description: ConfigMap containing data to use for the targets. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - key: - description: The key to select. + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean required: - - key + - port type: object - secret: - description: Secret containing data to use for the targets. + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus container - for the targets. - type: string - keySecret: - description: Secret containing the client key file for the targets. + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - required: - - host - type: object - arbitraryFSAccessThroughSMs: - description: ArbitraryFSAccessThroughSMs configures whether configuration - based on a service monitor can access arbitrary files on the file - system of the Prometheus container e.g. bearer token files. - properties: - deny: - type: boolean - type: object - baseImage: - description: Base image to use for a Prometheus deployment. - type: string - configMaps: - description: ConfigMaps is a list of ConfigMaps in the same namespace - as the Prometheus object, which shall be mounted into the Prometheus - Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/. - items: - type: string - type: array - containers: - description: 'Containers allows injecting additional containers or modifying - operator generated containers. This can be used to allow adding an - authentication proxy to a Prometheus pod or to change the behavior - of an operator generated container. Containers described here modify - an operator generated container if they share the same name and modifications - are done via a strategic merge patch. The current container names - are: `prometheus`, `prometheus-config-reloader`, `rules-configmap-reloader`, - and `thanos-sidecar`. Overriding containers is entirely outside the - scope of what the maintainers will support and by doing so, you accept - that this behaviour may break at any time without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + disableCompaction: + description: Disable prometheus compaction. + type: boolean + enableAdminAPI: + description: 'Enable access to prometheus web admin API. Defaults to + the value of `false`. WARNING: Enabling the admin APIs enables mutating + endpoints, to delete data, shutdown Prometheus, and more. Enabling + this should be done with care and the user is advised to add additional + authentication authorization via a proxy to ensure only clients authorized + to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' + type: boolean + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label + of origin for each alert and metric that is user created. The label + value will always be the namespace of the object that is being created. + type: string + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalLabels: + additionalProperties: + type: string + description: The labels to add to any time series or alerts when communicating + with external systems (federation, remote storage, Alertmanager). + type: object + externalUrl: + description: The external URL the Prometheus instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Prometheus is not served from root of a DNS name. + type: string + ignoreNamespaceSelectors: + description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector + settings from the podmonitor and servicemonitor configs, and they + will only discover endpoints within their current namespace. Defaults + to false. + type: boolean + image: + description: Image if specified has precedence over baseImage, tag and + sha combinations. Specifying the version is still necessary to ensure + the Prometheus Operator knows what version of Prometheus is being + configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace + to use for pulling prometheus and alertmanager images from registries + see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let + you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Prometheus configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart of + the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching is + entirely outside the scope of what the maintainers will support and + by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object type: object - httpGet: - description: HTTPGet specifies the http request to perform. + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - port + optional: + description: Specify whether the ConfigMap must be defined + type: boolean type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port + optional: + description: Specify whether the Secret must be defined + type: boolean type: object type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. properties: - name: - description: The header field name + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - value: - description: The header field value + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - name - - value + - port type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + - containerPort type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - user: - description: User is a SELinux user label that applies - to the container. + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string + required: + - devicePath + - name type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: This must match the Name of a Volume. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - mountPath + - name type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Prometheus server listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for Prometheus to be configured with. + type: string + logLevel: + description: Log level for Prometheus to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + overrideHonorLabels: + description: OverrideHonorLabels if set to true overrides all user configured + honor_labels. If HonorLabels is set in ServiceMonitor or PodMonitor + to true, this overrides honor_labels to false. + type: boolean + overrideHonorTimestamps: + description: OverrideHonorTimestamps allows to globally enforce honoring + timestamps in all scrape configs. + type: boolean + paused: + description: When a Prometheus deployment is paused, no actions except + for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are + propagated to the prometheus pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + type: object + podMonitorNamespaceSelector: + description: Namespaces to be selected for PodMonitor discovery. If + nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: volumeDevice describes a mapping of a raw block - device within a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. + key: + description: key is the label key that the selector applies + to. type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - devicePath - - name + - key + - operator type: object type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + podMonitorSelector: + description: '*Experimental* PodMonitors to be selected for target discovery.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: VolumeMount describes a mounting of a Volume within - a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). + key: + description: key is the label key that the selector applies + to. type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - mountPath - - name + - key + - operator type: object type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object type: object - type: array - disableCompaction: - description: Disable prometheus compaction. - type: boolean - enableAdminAPI: - description: 'Enable access to prometheus web admin API. Defaults to - the value of `false`. WARNING: Enabling the admin APIs enables mutating - endpoints, to delete data, shutdown Prometheus, and more. Enabling - this should be done with care and the user is advised to add additional - authentication authorization via a proxy to ensure only clients authorized - to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' - type: boolean - enforcedNamespaceLabel: - description: EnforcedNamespaceLabel enforces adding a namespace label - of origin for each alert and metric that is user created. The label - value will always be the namespace of the object that is being created. - type: string - evaluationInterval: - description: Interval between consecutive evaluations. - type: string - externalLabels: - additionalProperties: + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods type: string - description: The labels to add to any time series or alerts when communicating - with external systems (federation, remote storage, Alertmanager). - type: object - externalUrl: - description: The external URL the Prometheus instances will be available - under. This is necessary to generate correct URLs. This is necessary - if Prometheus is not served from root of a DNS name. - type: string - ignoreNamespaceSelectors: - description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector - settings from the podmonitor and servicemonitor configs, and they - will only discover endpoints within their current namespace. Defaults - to false. - type: boolean - image: - description: Image if specified has precedence over baseImage, tag and - sha combinations. Specifying the version is still necessary to ensure - the Prometheus Operator knows what version of Prometheus is being - configured. - type: string - imagePullSecrets: - description: An optional list of references to secrets in the same namespace - to use for pulling prometheus and alertmanager images from registries - see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. + prometheusExternalLabelName: + description: Name of Prometheus external label used to denote Prometheus + instance name. Defaults to the value of `prometheus`. External label + will _not_ be added when value is set to empty string (`""`). + type: string + query: + description: QuerySpec defines the query command line flags when starting + Prometheus. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + lookbackDelta: + description: The delta difference allowed for retrieving metrics + during expression evaluations. + type: string + maxConcurrency: + description: Number of concurrent queries that can be run at once. + format: int32 + type: integer + maxSamples: + description: Maximum number of samples a single query can load into + memory. Note that queries will fail if they would load more samples + than this into memory, so this also limits the number of samples + a query can return. + format: int32 + type: integer + timeout: + description: Maximum time a query may take before being aborted. type: string type: object - type: array - initContainers: - description: 'InitContainers allows adding initContainers to the pod - definition. Those can be used to e.g. fetch secrets for injection - into the Prometheus configuration from external sources. Any errors - during the execution of an initContainer will lead to a restart of - the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other then secret fetching is - entirely outside the scope of what the maintainers will support and - by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + remoteRead: + description: If specified, the remote_read spec. This is an experimental + feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteReadSpec defines the remote_read configuration + for prometheus. + properties: + basicAuth: + description: BasicAuth for the URL. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + password: + description: The secret in the service monitor namespace that + contains the password for authentication. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: bearer token for remote read. + type: string + bearerTokenFile: + description: File to read bearer token for remote read. + type: string + proxyUrl: + description: Optional ProxyURL + type: string + readRecent: + description: Whether reads should be made for queries for time + ranges that the local storage should have complete data for. + type: boolean + remoteTimeout: + description: Timeout for requests to the remote read endpoint. + type: string + requiredMatchers: + additionalProperties: + type: string + description: An optional list of equality matchers which have + to be present in a selector to query the remote read endpoint. + type: object + tlsConfig: + description: TLS Config to use for remote read. + properties: + ca: + description: Stuct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + secret: + description: Secret containing data to use for the targets. properties: key: description: The key of the secret to select from. Must @@ -2318,1172 +3496,1006 @@ spec: - key type: object type: object - required: - - name + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + url: + description: The URL of the endpoint to send samples to. + type: string + required: + - url + type: object + type: array + remoteWrite: + description: If specified, the remote_write spec. This is an experimental + feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteWriteSpec defines the remote_write configuration + for prometheus. + properties: + basicAuth: + description: BasicAuth for the URL. properties: - configMapRef: - description: The ConfigMap to select from + password: + description: The secret in the service monitor namespace that + contains the password for authentication. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap must be defined + description: Specify whether the Secret or its key must + be defined type: boolean + required: + - key type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + username: + description: The secret in the service monitor namespace that + contains the username for authentication. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret must be defined + description: Specify whether the Secret or its key must + be defined type: boolean + required: + - key type: object type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + bearerToken: + description: File to read bearer token for remote write. + type: string + bearerTokenFile: + description: File to read bearer token for remote write. + type: string + proxyUrl: + description: Optional ProxyURL + type: string + queueConfig: + description: QueueConfig allows tuning of the remote write queue + parameters. properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external port to. + batchSendDeadline: + description: BatchSendDeadline is the maximum time a sample + will wait in buffer. type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. - format: int32 + capacity: + description: Capacity is the number of samples to buffer per + shard before we start dropping them. type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. + maxBackoff: + description: MaxBackoff is the maximum retry delay. type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". + maxRetries: + description: MaxRetries is the maximum number of times to + retry a batch on recoverable errors. + type: integer + maxSamplesPerSend: + description: MaxSamplesPerSend is the maximum number of samples + per send. + type: integer + maxShards: + description: MaxShards is the maximum number of shards, i.e. + amount of concurrency. + type: integer + minBackoff: + description: MinBackoff is the initial retry delay. Gets doubled + for every retry. type: string - required: - - containerPort + minShards: + description: MinShards is the minimum number of shards, i.e. + amount of concurrency. + type: integer type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes + remoteTimeout: + description: Timeout for requests to the remote write endpoint. + type: string + tlsConfig: + description: TLS Config to use for remote write. + properties: + ca: + description: Stuct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. properties: + key: + description: The key to select. + type: string name: - description: The header field name + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - value: - description: The header field value + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean required: - - name - - value + - key type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + url: + description: The URL of the endpoint to send samples to. + type: string + writeRelabelConfigs: + description: The list of remote write relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. + action: + description: Action to perform based on regex matching. + Default is 'replace' type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + separator: + description: Separator placed between concatenated source + label values. default is ';'. type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. items: type: string type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer + type: array + required: + - url + type: object + type: array + replicaExternalLabelName: + description: Name of Prometheus external label used to denote replica + name. Defaults to the value of `prometheus_replica`. External label + will _not_ be added when value is set to empty string (`""`). + type: string + replicas: + description: Number of instances to deploy for a Prometheus deployment. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration Prometheus shall retain data for. Default + is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` + (milliseconds seconds minutes hours days weeks years). + type: string + retentionSize: + description: Maximum amount of disk space used by blocks. + type: string + routePrefix: + description: The route prefix Prometheus registers HTTP handlers for. + This is useful, if using ExternalURL and a proxy is rewriting HTTP + routes of a request, and the actual ExternalURL is still true, but + the server serves requests under a different route prefix. For example + for use with `kubectl proxy`. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for PrometheusRules discovery. + If unspecified, only the same namespace as the Prometheus object is + in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: volumeDevice describes a mapping of a raw block - device within a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. + key: + description: key is the label key that the selector applies + to. type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - devicePath - - name + - key + - operator type: object type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + ruleSelector: + description: A selector to select which PrometheusRules to mount for + loading alerting rules from. Until (excluding) Prometheus Operator + v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps + to PrometheusRule custom resources selected by RuleSelector. Make + sure it does not match any config maps that you do not want to be + migrated. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: VolumeMount describes a mounting of a Volume within - a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). + key: + description: key is the label key that the selector applies + to. type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - mountPath - - name + - key + - operator type: object type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object type: object - type: array - listenLocal: - description: ListenLocal makes the Prometheus server listen on loopback, - so that it does not bind against the Pod IP. - type: boolean - logFormat: - description: Log format for Prometheus to be configured with. - type: string - logLevel: - description: Log level for Prometheus to be configured with. - type: string - nodeSelector: - additionalProperties: - type: string - description: Define which Nodes the Pods are scheduled on. - type: object - overrideHonorLabels: - description: OverrideHonorLabels if set to true overrides all user configured - honor_labels. If HonorLabels is set in ServiceMonitor or PodMonitor - to true, this overrides honor_labels to false. - type: boolean - overrideHonorTimestamps: - description: OverrideHonorTimestamps allows to globally enforce honoring - timestamps in all scrape configs. - type: boolean - paused: - description: When a Prometheus deployment is paused, no actions except - for deletion will be performed on the underlying objects. - type: boolean - podMetadata: - description: PodMetadata configures Labels and Annotations which are - propagated to the prometheus pods. - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' - type: object - type: object - podMonitorNamespaceSelector: - description: Namespaces to be selected for PodMonitor discovery. If - nil, only check own namespace. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. + rules: + description: /--rules.*/ command-line arguments. + properties: + alert: + description: /--rules.alert.*/ command-line arguments properties: - key: - description: key is the label key that the selector applies - to. + forGracePeriod: + description: Minimum duration between alert and restored 'for' + state. This is maintained only for alerts with configured + 'for' time greater than grace period. type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. + forOutageTolerance: + description: Max time to tolerate prometheus outage for restoring + 'for' state of alert. + type: string + resendDelay: + description: Minimum amount of time to wait before resending + an alert to Alertmanager. type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - podMonitorSelector: - description: '*Experimental* PodMonitors to be selected for target discovery.' - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. + type: object + scrapeInterval: + description: Interval between consecutive scrapes. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the + Prometheus object, which shall be mounted into the Prometheus Pods. + The Secrets are mounted into /etc/prometheus/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers + in a pod. Some volume types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The owning GID will + be the FSGroup 2. The setgid bit is set (new files created in + the volume will be owned by FSGroup) 3. The permission bits are + OR'd with rw-rw---- \n If unset, the Kubelet will not modify the + ownership and permissions of any volume." + format: int64 + type: integer + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux + context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. properties: - key: - description: key is the label key that the selector applies - to. + level: + description: Level is SELinux level label that applies to the + container. type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. + role: + description: Role is a SELinux role label that applies to the + container. type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: + type: + description: Type is a SELinux type label that applies to the + container. + type: string + user: + description: User is a SELinux user label that applies to the + container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in + each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set type: string - type: array - required: - - key - - operator + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and is only + honored by servers that enable the WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of + the container process. Defaults to the user specified in image + metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. This + field is beta-level and may be disabled with the WindowsRunAsUserName + feature flag. + type: string type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - portName: - description: Port name used for the pods and governing service. This - defaults to web - type: string - priorityClassName: - description: Priority class assigned to the Pods - type: string - prometheusExternalLabelName: - description: Name of Prometheus external label used to denote Prometheus - instance name. Defaults to the value of `prometheus`. External label - will _not_ be added when value is set to empty string (`""`). - type: string - query: - description: QuerySpec defines the query command line flags when starting - Prometheus. - properties: - lookbackDelta: - description: The delta difference allowed for retrieving metrics - during expression evaluations. - type: string - maxConcurrency: - description: Number of concurrent queries that can be run at once. - format: int32 - type: integer - maxSamples: - description: Maximum number of samples a single query can load into - memory. Note that queries will fail if they would load more samples - than this into memory, so this also limits the number of samples - a query can return. - format: int32 - type: integer - timeout: - description: Maximum time a query may take before being aborted. - type: string - type: object - remoteRead: - description: If specified, the remote_read spec. This is an experimental - feature, it may change in any upcoming release in a breaking way. - items: - description: RemoteReadSpec defines the remote_read configuration - for prometheus. + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to + use to run the Prometheus Pods. + type: string + serviceMonitorNamespaceSelector: + description: Namespaces to be selected for ServiceMonitor discovery. + If nil, only check own namespace. properties: - basicAuth: - description: BasicAuth for the URL. + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + serviceMonitorSelector: + description: ServiceMonitors to be selected for target discovery. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + sha: + description: SHA of Prometheus container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. + type: string + storage: + description: Storage spec to specify how storage shall be used. + properties: + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + type: string + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: 'Spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner + can support VolumeSnapshot data source, it will create + a new volume and data will be restored to the volume at + the same time. If the provisioner does not support VolumeSnapshot + data source, volume will not be created and the failure + will be reported as an event. In the future, we plan to + support more data source types and the behavior of the + provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + type: string + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details + about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is + a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object type: object - bearerToken: - description: bearer token for remote read. - type: string - bearerTokenFile: - description: File to read bearer token for remote read. - type: string - proxyUrl: - description: Optional ProxyURL - type: string - readRecent: - description: Whether reads should be made for queries for time - ranges that the local storage should have complete data for. - type: boolean - remoteTimeout: - description: Timeout for requests to the remote read endpoint. + type: object + tag: + description: Tag of Prometheus container image to be deployed. Defaults + to the value of `version`. Version is ignored if Tag is set. + type: string + thanos: + description: "Thanos configuration allows configuring various aspects + of a Prometheus server in a Thanos environment. \n This section is + experimental, it may change significantly without deprecation notice + in any release. \n This is experimental and may change significantly + without backward compatibility in any release." + properties: + baseImage: + description: Thanos base image if other than default. type: string - requiredMatchers: - additionalProperties: - type: string - description: An optional list of equality matchers which have - to be present in a selector to query the remote read endpoint. - type: object - tlsConfig: - description: TLS Config to use for remote read. + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from + which Thanos Querier reads recorded rule data. Note: Currently + only the CAFile, CertFile, and KeyFile fields are supported. Maps + to the ''--grpc-server-tls-*'' CLI args.' properties: ca: description: Stuct containing the CA cert to use for the targets. properties: configMap: - description: ConfigMap containing data to use for the - targets. + description: ConfigMap containing data to use for the targets. properties: key: description: The key to select. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined + description: Specify whether the ConfigMap or its key + must be defined type: boolean required: - key @@ -3497,12 +4509,11 @@ spec: type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined + description: Specify whether the Secret or its key must + be defined type: boolean required: - key @@ -3517,20 +4528,18 @@ spec: targets. properties: configMap: - description: ConfigMap containing data to use for the - targets. + description: ConfigMap containing data to use for the targets. properties: key: description: The key to select. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined + description: Specify whether the ConfigMap or its key + must be defined type: boolean required: - key @@ -3544,12 +4553,11 @@ spec: type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined + description: Specify whether the Secret or its key must + be defined type: boolean required: - key @@ -3563,12 +4571,11 @@ spec: description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. + description: Path to the client key file in the Prometheus container + for the targets. type: string keySecret: - description: Secret containing the client key file for the - targets. + description: Secret containing the client key file for the targets. properties: key: description: The key of the secret to select from. Must @@ -3589,2415 +4596,1406 @@ spec: description: Used to verify the hostname for the targets. type: string type: object - url: - description: The URL of the endpoint to send samples to. + image: + description: Image if specified has precedence over baseImage, tag + and sha combinations. Specifying the version is still necessary + to ensure the Prometheus Operator knows what version of Thanos + is being configured. type: string - required: - - url - type: object - type: array - remoteWrite: - description: If specified, the remote_write spec. This is an experimental - feature, it may change in any upcoming release in a breaking way. - items: - description: RemoteWriteSpec defines the remote_write configuration - for prometheus. - properties: - basicAuth: - description: BasicAuth for the URL. + listenLocal: + description: ListenLocal makes the Thanos sidecar listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + resources: + description: Resources defines the resource requirements for the + Thanos sidecar. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object - bearerToken: - description: File to read bearer token for remote write. + sha: + description: SHA of Thanos container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. type: string - bearerTokenFile: - description: File to read bearer token for remote write. - type: string - proxyUrl: - description: Optional ProxyURL + tag: + description: Tag of Thanos sidecar container image to be deployed. + Defaults to the value of `version`. Version is ignored if Tag + is set. type: string - queueConfig: - description: QueueConfig allows tuning of the remote write queue - parameters. + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is + an experimental feature, it may change in any upcoming release + in a breaking way. properties: - batchSendDeadline: - description: BatchSendDeadline is the maximum time a sample - will wait in buffer. - type: string - capacity: - description: Capacity is the number of samples to buffer per - shard before we start dropping them. - type: integer - maxBackoff: - description: MaxBackoff is the maximum retry delay. + key: + description: The key of the secret to select from. Must be + a valid secret key. type: string - maxRetries: - description: MaxRetries is the maximum number of times to - retry a batch on recoverable errors. - type: integer - maxSamplesPerSend: - description: MaxSamplesPerSend is the maximum number of samples - per send. - type: integer - maxShards: - description: MaxShards is the maximum number of shards, i.e. - amount of concurrency. - type: integer - minBackoff: - description: MinBackoff is the initial retry delay. Gets doubled - for every retry. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - minShards: - description: MinShards is the minimum number of shards, i.e. - amount of concurrency. - type: integer + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key type: object - remoteTimeout: - description: Timeout for requests to the remote write endpoint. + version: + description: Version describes the version of Thanos to use. type: string - tlsConfig: - description: TLS Config to use for remote write. - properties: - ca: - description: Stuct containing the CA cert to use for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for the - targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for the - targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - url: - description: The URL of the endpoint to send samples to. - type: string - writeRelabelConfigs: - description: The list of remote write relabel configurations. - items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' - properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: - type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string - type: object - type: array - required: - - url type: object - type: array - replicaExternalLabelName: - description: Name of Prometheus external label used to denote replica - name. Defaults to the value of `prometheus_replica`. External label - will _not_ be added when value is set to empty string (`""`). - type: string - replicas: - description: Number of instances to deploy for a Prometheus deployment. - format: int32 - type: integer - resources: - description: Define resources requests and limits for single Pods. - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - retention: - description: Time duration Prometheus shall retain data for. Default - is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` - (milliseconds seconds minutes hours days weeks years). - type: string - retentionSize: - description: Maximum amount of disk space used by blocks. - type: string - routePrefix: - description: The route prefix Prometheus registers HTTP handlers for. - This is useful, if using ExternalURL and a proxy is rewriting HTTP - routes of a request, and the actual ExternalURL is still true, but - the server serves requests under a different route prefix. For example - for use with `kubectl proxy`. - type: string - ruleNamespaceSelector: - description: Namespaces to be selected for PrometheusRules discovery. - If unspecified, only the same namespace as the Prometheus object is - in is used. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - ruleSelector: - description: A selector to select which PrometheusRules to mount for - loading alerting rules from. Until (excluding) Prometheus Operator - v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps - to PrometheusRule custom resources selected by RuleSelector. Make - sure it does not match any config maps that you do not want to be - migrated. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - rules: - description: /--rules.*/ command-line arguments. - properties: - alert: - description: /--rules.alert.*/ command-line arguments + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . properties: - forGracePeriod: - description: Minimum duration between alert and restored 'for' - state. This is maintained only for alerts with configured - 'for' time greater than grace period. + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. type: string - forOutageTolerance: - description: Max time to tolerate prometheus outage for restoring - 'for' state of alert. + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. type: string - resendDelay: - description: Minimum amount of time to wait before resending - an alert to Alertmanager. + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. type: string type: object - type: object - scrapeInterval: - description: Interval between consecutive scrapes. - type: string - secrets: - description: Secrets is a list of Secrets in the same namespace as the - Prometheus object, which shall be mounted into the Prometheus Pods. - The Secrets are mounted into /etc/prometheus/secrets/. - items: + type: array + version: + description: Version of Prometheus to be deployed. type: string - type: array - securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. This defaults to the default PodSecurityContext. - properties: - fsGroup: - description: "A special supplemental group that applies to all containers - in a pod. Some volume types allow the Kubelet to change the ownership - of that volume to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files created in - the volume will be owned by FSGroup) 3. The permission bits are - OR'd with rw-rw---- \n If unset, the Kubelet will not modify the - ownership and permissions of any volume." - format: int64 - type: integer - runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. May - also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux - context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the prometheus container, that + are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a + container. properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. type: string - user: - description: User is a SELinux user label that applies to the - container. + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When not + set, MountPropagationNone is used. This field is beta in 1.10. type: string - type: object - supplementalGroups: - description: A list of groups applied to the first process run in - each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. - items: - format: int64 - type: integer - type: array - sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + name: + description: This must match the Name of a Volume. type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. This field is alpha-level and is only - honored by servers that enable the WindowsGMSA feature flag. + readOnly: + description: Mounted read-only if true, read-write otherwise (false + or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint of - the container process. Defaults to the user specified in image - metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. This - field is beta-level and may be disabled with the WindowsRunAsUserName - feature flag. + subPathExpr: + description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's + environment. Defaults to "" (volume's root). SubPathExpr and + SubPath are mutually exclusive. type: string + required: + - mountPath + - name type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount to - use to run the Prometheus Pods. - type: string - serviceMonitorNamespaceSelector: - description: Namespaces to be selected for ServiceMonitor discovery. - If nil, only check own namespace. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: + type: array + volumes: + description: Volumes allows configuration of additional volumes on the + output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - serviceMonitorSelector: - description: ServiceMonitors to be selected for target discovery. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - sha: - description: SHA of Prometheus container image to be deployed. Defaults - to the value of `version`. Similar to a tag, but the SHA explicitly - deploys an immutable container image. Version and Tag are ignored - if SHA is set. - type: string - storage: - description: Storage spec to specify how storage shall be used. - properties: - emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' - properties: - medium: - description: 'What type of storage medium should back this directory. - The default is "" which means to use the node''s default medium. - Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. The - default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName type: object - spec: - description: 'Spec defines the desired characteristics of a - volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner - can support VolumeSnapshot data source, it will create - a new volume and data will be restored to the volume at - the same time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the - provisioner may change. + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string name: - description: Name is the name of resource being referenced + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string + required: + - monitors type: object - status: - description: 'Status represents the current information/status - of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' properties: - accessModes: - description: 'AccessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - type: string - description: Represents the actual resources of the underlying - volume. + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string type: object - conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. items: - description: PersistentVolumeClaimCondition contails details - about state of pvc + description: Maps a string key to a path within a volume. properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned - from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details - about last transition. - type: string - reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. - type: string - status: + key: + description: The key to project. type: string - type: - description: PersistentVolumeClaimConditionType is - a valid value of PersistentVolumeClaimCondition.Type + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. type: string required: - - status - - type + - key + - path type: object type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean type: object - type: object - type: object - tag: - description: Tag of Prometheus container image to be deployed. Defaults - to the value of `version`. Version is ignored if Tag is set. - type: string - thanos: - description: "Thanos configuration allows configuring various aspects - of a Prometheus server in a Thanos environment. \n This section is - experimental, it may change significantly without deprecation notice - in any release. \n This is experimental and may change significantly - without backward compatibility in any release." - properties: - baseImage: - description: Thanos base image if other than default. - type: string - grpcServerTlsConfig: - description: 'GRPCServerTLSConfig configures the gRPC server from - which Thanos Querier reads recorded rule data. Note: Currently - only the CAFile, CertFile, and KeyFile fields are supported. Maps - to the ''--grpc-server-tls-*'' CLI args.' - properties: - ca: - description: Stuct containing the CA cert to use for the targets. + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). properties: - configMap: - description: ConfigMap containing data to use for the targets. + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. properties: - key: - description: The key to select. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. type: object + required: + - driver type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for the - targets. + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + type: string + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. properties: - configMap: - description: ConfigMap containing data to use for the targets. + driver: + description: Driver is the name of the driver to use for this + volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' properties: - key: - description: The key to select. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key type: object - secret: - description: Secret containing data to use for the targets. + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus container - for the targets. + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string - keySecret: - description: Secret containing the client key file for the targets. + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string - optional: - description: Specify whether the Secret or its key must - be defined + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. type: boolean required: - - key + - claimName type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - image: - description: Image if specified has precedence over baseImage, tag - and sha combinations. Specifying the version is still necessary - to ensure the Prometheus Operator knows what version of Thanos - is being configured. - type: string - listenLocal: - description: ListenLocal makes the Thanos sidecar listen on loopback, - so that it does not bind against the Pod IP. - type: boolean - objectStorageConfig: - description: ObjectStorageConfig configures object storage in Thanos. - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - resources: - description: Resources defines the resource requirements for the - Thanos sidecar. If not provided, no requests/limits will be set - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID type: object - type: object - sha: - description: SHA of Thanos container image to be deployed. Defaults - to the value of `version`. Similar to a tag, but the SHA explicitly - deploys an immutable container image. Version and Tag are ignored - if SHA is set. - type: string - tag: - description: Tag of Thanos sidecar container image to be deployed. - Defaults to the value of `version`. Version is ignored if Tag - is set. - type: string - tracingConfig: - description: TracingConfig configures tracing in Thanos. This is - an experimental feature, it may change in any upcoming release - in a breaking way. - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - version: - description: Version describes the version of Thanos to use. - type: string - type: object - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, operator - must be Exists; this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. Exists - is equivalent to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the - toleration (which must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By default, it is not - set, which means tolerate the taint forever (do not evict). - Zero and negative values will be treated as 0 (evict immediately) - by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - version: - description: Version of Prometheus to be deployed. - type: string - volumeMounts: - description: VolumeMounts allows configuration of additional VolumeMounts - on the output StatefulSet definition. VolumeMounts specified will - be appended to other VolumeMounts in the prometheus container, that - are generated as a result of StorageSpec objects. - items: - description: VolumeMount describes a mounting of a Volume within a - container. - properties: - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When not - set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false - or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's - volume should be mounted. Behaves similarly to SubPath but environment - variable references $(VAR_NAME) are expanded using the container's - environment. Defaults to "" (volume's root). SubPathExpr and - SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - volumes: - description: Volumes allows configuration of additional volumes on the - output StatefulSet definition. Volumes specified will be appended - to other volumes that are generated as a result of StorageSpec objects. - items: - description: Volume represents a named volume in a pod that may be - accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'Unique ID of the persistent disk resource in - AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: AzureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' - type: string - diskName: - description: The Name of the data disk in the blob storage - type: string - diskURI: - description: The URI the data disk in the blob storage - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: AzureFile represents an Azure File Service mount - on the host and bind mount to the pod. - properties: - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: the name of secret that contains Azure Storage - Account Name and Key - type: string - shareName: - description: Share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: CephFS represents a Ceph FS mount on the host that - shares a pod's lifetime - properties: - monitors: - description: 'Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format + of the exposed resources, defaults to + "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group type: string - type: array - path: - description: 'Optional: Used as the mounted root, rather than - the full Ceph tree, default is /' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'Cinder represents a cinder volume attached and mounted - on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: type: string - type: object - volumeID: - description: 'volume id used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: ConfigMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced ConfigMap will be projected into - the volume as a file whose name is the key and content is - the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the - ConfigMap, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its keys must - be defined - type: boolean - type: object - csi: - description: CSI (Container Storage Interface) represents storage - that is handled by an external CSI driver (Alpha feature). - properties: - driver: - description: Driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to the - associated CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the secret - object containing sensitive information to pass to the CSI - driver to complete the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be empty if no secret - is required. If the secret object contains more than one - secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - readOnly: - description: Specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string - description: VolumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: DownwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name - of the file to be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 encoded. The - first item of the relative path must not start with - ''..''' + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path type: object - type: array - type: object - emptyDir: - description: 'EmptyDir represents a temporary directory that shares - a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - fc: - description: FC represents a Fibre Channel resource that is attached - to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - lun: - description: 'Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' - items: + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. type: string - type: array - wwids: - description: 'Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be - set, but not both simultaneously.' - items: + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. type: string - type: array - type: object - flexVolume: - description: FlexVolume represents a generic volume resource that - is provisioned/attached using an exec based plugin. - properties: - driver: - description: Driver is the name of the driver to use for this - volume. - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. - type: string - options: - additionalProperties: + system: + description: The name of the storage system as configured + in ScaleIO. type: string - description: 'Optional: Extra command options if any.' - type: object - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the plugin - scripts. This may be empty if no secret object is specified. - If the secret object contains more than one secret, all - secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - driver - type: object - flocker: - description: Flocker represents a Flocker volume attached to a - kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'GitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision a - container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: Target directory name. Must not contain or start - with '..'. If '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, the volume - will contain the git repository in the subdirectory with - the given name. - type: string - repository: - description: Repository URL - type: string - revision: - description: Commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the host - that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'EndpointsName is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'Path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'ReadOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'HostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'Path of the directory on the host. If the path - is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'Type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to the - pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. - type: string - iqn: - description: Target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). - items: + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. type: string - type: array - readOnly: - description: ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'Volume''s name. Must be a DNS_LABEL and unique within - the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'NFS represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'Path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'ReadOnly here will force the NFS export to be - mounted with read-only permissions. Defaults to false. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'Server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a reference - to a PersistentVolumeClaim in the same namespace. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: ID that identifies Photon Controller persistent - disk - type: string - required: - - pdID - type: object - portworxVolume: - description: PortworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: FSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: VolumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: Items for all in one resources secrets, configmaps, - and downward API - properties: - defaultMode: - description: Mode bits to use on created files by default. - Must be a value between 0 and 0777. Directories within the - path are not affected by this setting. This might be in - conflict with other options that affect the file mode, like - fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: list of volume projections - items: - description: Projection that may be projected along with - other supported volume types - properties: - configMap: - description: information about the configMap data to - project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - downwardAPI: - description: information about the downwardAPI data - to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format - of the exposed resources, defaults to - "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: information about the secret data to project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project - properties: - audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the mount - point of the file to project the token into. - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - description: Quobyte represents a Quobyte mount on the host that - shares a pod's lifetime - properties: - group: - description: Group to map volume access to Default is no group - type: string - readOnly: - description: ReadOnly here will force the Quobyte volume to - be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: Registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: Tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: User to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: Volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'RBD represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'Keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' type: string - type: array - pool: - description: 'The rados pool name. Default is rbd. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'SecretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: ScaleIO represents a ScaleIO persistent volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: The host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false - type: boolean - storageMode: - description: Indicates whether the storage for a volume should - be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. - type: string - system: - description: The name of the storage system as configured - in ScaleIO. - type: string - volumeName: - description: The name of a volume already created in the ScaleIO - system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'Secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the - value. If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be present. - If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' path - or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - optional: - description: Specify whether the Secret or its keys must be - defined - type: boolean - secretName: - description: 'Name of the secret in the pod''s namespace to - use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: StorageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - volumeName: - description: VolumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within a - namespace. - type: string - volumeNamespace: - description: VolumeNamespace specifies the scope of the volume - within StorageOS. If no namespace is specified then the - Pod's namespace will be used. This allows the Kubernetes - name scoping to be mirrored within StorageOS for tighter - integration. Set VolumeName to any name to override the - default behaviour. Set to "default" if you are not using - namespaces within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: VsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. - type: string - volumePath: - description: Path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - walCompression: - description: Enable compression of the write-ahead log using Snappy. - This flag is only available in versions of Prometheus >= 2.11.0. - type: boolean - type: object - status: - description: 'Most recent observed status of the Prometheus cluster. Read-only. - Not included when requesting from the apiserver, only from the Prometheus - Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) - targeted by this Prometheus deployment. - format: int32 - type: integer - paused: - description: Represents whether any actions on the underlaying managed - objects are being performed. Only delete actions will be performed. - type: boolean - replicas: - description: Total number of non-terminated pods targeted by this Prometheus - deployment (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable pods targeted by this Prometheus - deployment. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated pods targeted by this Prometheus - deployment that have the desired version spec. - format: int32 - type: integer - required: - - availableReplicas - - paused - - replicas - - unavailableReplicas - - updatedReplicas - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + walCompression: + description: Enable compression of the write-ahead log using Snappy. + This flag is only available in versions of Prometheus >= 2.11.0. + type: boolean + type: object + status: + description: 'Most recent observed status of the Prometheus cluster. Read-only. + Not included when requesting from the apiserver, only from the Prometheus + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Prometheus deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlaying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Prometheus + deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Prometheus + deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Prometheus + deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + additionalPrinterColumns: + - jsonPath: .spec.version + description: The version of Prometheus + name: Version + type: string + - jsonPath: .spec.replicas + description: The desired replicas number of Prometheuses + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date served: true storage: true + subresources: {} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml index 3f5cb49239..5a059789a3 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: prometheusrules.monitoring.coreos.com spec: @@ -16,77 +15,77 @@ spec: singular: prometheusrule preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: PrometheusRule defines alerting rules for a Prometheus instance - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired alerting rule definitions for Prometheus. - properties: - groups: - description: Content of Prometheus rule file - items: - description: 'RuleGroup is a list of sequentially evaluated recording - and alerting rules. Note: PartialResponseStrategy is only used by - ThanosRuler and will be ignored by Prometheus instances. Valid - values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' - properties: - interval: - type: string - name: - type: string - partial_response_strategy: - type: string - rules: - items: - description: Rule describes an alerting or recording rule. - properties: - alert: - type: string - annotations: - additionalProperties: - type: string - type: object - expr: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - for: - type: string - labels: - additionalProperties: - type: string - type: object - record: - type: string - required: - - expr - type: object - type: array - required: - - name - - rules - type: object - type: array - type: object - required: - - spec - type: object - version: v1 versions: - name: v1 + schema: + openAPIV3Schema: + description: PrometheusRule defines alerting rules for a Prometheus instance + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired alerting rule definitions for Prometheus. + properties: + groups: + description: Content of Prometheus rule file + items: + description: 'RuleGroup is a list of sequentially evaluated recording + and alerting rules. Note: PartialResponseStrategy is only used by + ThanosRuler and will be ignored by Prometheus instances. Valid + values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' + properties: + interval: + type: string + name: + type: string + partial_response_strategy: + type: string + rules: + items: + description: Rule describes an alerting or recording rule. + properties: + alert: + type: string + annotations: + additionalProperties: + type: string + type: object + expr: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + for: + type: string + labels: + additionalProperties: + type: string + type: object + record: + type: string + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml index e631c2c090..2177a97072 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: servicemonitors.monitoring.coreos.com spec: @@ -16,445 +15,445 @@ spec: singular: servicemonitor preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: ServiceMonitor defines monitoring for a set of services. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired Service selection for target discovery - by Prometheus. - properties: - endpoints: - description: A list of endpoints allowed as part of this ServiceMonitor. - items: - description: Endpoint defines a scrapeable endpoint serving Prometheus - metrics. - properties: - basicAuth: - description: 'BasicAuth allow an endpoint to authenticate over - basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' - properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceMonitor defines monitoring for a set of services. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Service selection for target discovery + by Prometheus. + properties: + endpoints: + description: A list of endpoints allowed as part of this ServiceMonitor. + items: + description: Endpoint defines a scrapeable endpoint serving Prometheus + metrics. + properties: + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over + basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' + properties: + password: + description: The secret in the service monitor namespace that + contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenFile: + description: File to read bearer token for scraping targets. + type: string + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping + targets. The secret needs to be in the same namespace as the + service monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. + type: array + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the service port this endpoint refers to. + Mutually exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before scraping. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object - type: object - bearerTokenFile: - description: File to read bearer token for scraping targets. - type: string - bearerTokenSecret: - description: Secret to mount to read bearer token for scraping - targets. The secret needs to be in the same namespace as the - service monitor and accessible by the Prometheus Operator. - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. - type: boolean - honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. - type: boolean - interval: - description: Interval at which metrics should be scraped - type: string - metricRelabelings: - description: MetricRelabelConfigs to apply to samples before ingestion. - items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: Name or number of the pod port this endpoint refers + to. Mutually exclusive with port. + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' + ca: + description: Stuct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: - type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string type: object + type: object + type: array + jobLabel: + description: The label to use to retrieve the job name from. + type: string + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects + are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string type: array - params: - additionalProperties: - items: - type: string - type: array - description: Optional HTTP URL parameters - type: object - path: - description: HTTP path to scrape for metrics. - type: string - port: - description: Name of the service port this endpoint refers to. - Mutually exclusive with targetPort. - type: string - proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. - type: string - relabelings: - description: 'RelabelConfigs to apply to samples before scraping. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + type: object + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod + onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Endpoints objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' + key: + description: key is the label key that the selector applies + to. type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string + required: + - key + - operator type: object type: array - scheme: - description: HTTP scheme to use for scraping. - type: string - scrapeTimeout: - description: Timeout after which the scrape is ended - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: Name or number of the pod port this endpoint refers - to. Mutually exclusive with port. - x-kubernetes-int-or-string: true - tlsConfig: - description: TLS configuration to use when scraping the endpoint - properties: - ca: - description: Stuct containing the CA cert to use for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for the - targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for the - targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - serverName: - description: Used to verify the hostname for the targets. - type: string + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. type: object type: object - type: array - jobLabel: - description: The label to use to retrieve the job name from. - type: string - namespaceSelector: - description: Selector to select which namespaces the Endpoints objects - are discovered from. - properties: - any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. - type: boolean - matchNames: - description: List of namespace names. - items: - type: string - type: array - type: object - podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes Pod - onto the target. - items: - type: string - type: array - sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. - format: int64 - type: integer - selector: - description: Selector to select Endpoints objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - targetLabels: - description: TargetLabels transfers labels on the Kubernetes Service - onto the target. - items: - type: string - type: array - required: - - endpoints - - selector - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + targetLabels: + description: TargetLabels transfers labels on the Kubernetes Service + onto the target. + items: + type: string + type: array + required: + - endpoints + - selector + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml index e7b935a998..f43e18004a 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: thanosrulers.monitoring.coreos.com spec: @@ -16,4711 +15,4711 @@ spec: singular: thanosruler preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: ThanosRuler defines a ThanosRuler deployment. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ThanosRuler cluster. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all - objects with implicit weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no objects (i.e. is also - a no-op). + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ThanosRuler defines a ThanosRuler deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the ThanosRuler cluster. + More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array required: - - preference - - weight + - nodeSelectorTerms type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The - terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + matchLabels: + additionalProperties: type: string - type: array - required: - - key - - operator + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. + type: array + required: + - key + - operator type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may not - try to eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding to - each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchLabels: - additionalProperties: + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some other - pod(s)). + required: + - topologyKey + type: object + type: array + type: object + type: object + alertDropLabels: + description: AlertDropLabels configure the label names which should + be dropped in ThanosRuler alerts. If `labels` field is not provided, + `thanos_ruler_replica` will be dropped in alerts by default. + items: + type: string + type: array + alertQueryUrl: + description: The external Query URL the Thanos Ruler will set in the + 'Source' field of all alerts. Maps to the '--alert.query-url' CLI + arg. + type: string + alertmanagersConfig: + description: Define configuration for connecting to alertmanager. Only + available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config` + arg. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + alertmanagersUrl: + description: 'Define URLs to send alerts to Alertmanager. For Thanos + v0.10.0 and higher, AlertManagersConfig should be used instead. Note: + this field will be ignored if AlertManagersConfig is specified. Maps + to the `alertmanagers.url` arg.' + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or modifying + operator generated containers. This can be used to allow adding an + authentication proxy to a ThanosRuler pod or to change the behavior + of an operator generated container. Containers described here modify + an operator generated container if they share the same name and modifications + are done via a strategic merge patch. The current container names + are: `thanos-ruler` and `rules-configmap-reloader`. Overriding containers + is entirely outside the scope of what the maintainers will support + and by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the anti-affinity expressions specified by this - field, but it may choose a node that violates one or more - of the expressions. The node that is most preferred is the - one with the greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field - and adding "weight" to the sum if the node has pods which - matches the corresponding podAffinityTerm; the node(s) with - the highest sum are the most preferred. + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) + description: EnvVar represents an environment variable present + in a Container. properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer required: - - podAffinityTerm - - weight + - name type: object type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms must - be satisfied. + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running + description: EnvFromSource represents the source of a set of + ConfigMaps properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. + configMapRef: + description: The ConfigMap to select from properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string - required: - - topologyKey + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object type: array - type: object - type: object - alertDropLabels: - description: AlertDropLabels configure the label names which should - be dropped in ThanosRuler alerts. If `labels` field is not provided, - `thanos_ruler_replica` will be dropped in alerts by default. - items: - type: string - type: array - alertQueryUrl: - description: The external Query URL the Thanos Ruler will set in the - 'Source' field of all alerts. Maps to the '--alert.query-url' CLI - arg. - type: string - alertmanagersConfig: - description: Define configuration for connecting to alertmanager. Only - available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config` - arg. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - alertmanagersUrl: - description: 'Define URLs to send alerts to Alertmanager. For Thanos - v0.10.0 and higher, AlertManagersConfig should be used instead. Note: - this field will be ignored if AlertManagersConfig is specified. Maps - to the `alertmanagers.url` arg.' - items: - type: string - type: array - containers: - description: 'Containers allows injecting additional containers or modifying - operator generated containers. This can be used to allow adding an - authentication proxy to a ThanosRuler pod or to change the behavior - of an operator generated container. Containers described here modify - an operator generated container if they share the same name and modifications - are done via a strategic merge patch. The current container names - are: `thanos-ruler` and `rules-configmap-reloader`. Overriding containers - is entirely outside the scope of what the maintainers will support - and by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. properties: - key: - description: The key to select. + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - containerPort type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - required: - - containerPort + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string required: - - port + - devicePath + - name type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + - mountPath + - name type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label + of origin for each alert and metric that is user created. The label + value will always be the namespace of the object that is being created. + type: string + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalPrefix: + description: The external URL the Thanos Ruler instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Thanos Ruler is not served from root of a DNS name. + type: string + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from which + Thanos Querier reads recorded rule data. Note: Currently only the + CAFile, CertFile, and KeyFile fields are supported. Maps to the ''--grpc-server-tls-*'' + CLI args.' + properties: + ca: + description: Stuct containing the CA cert to use for the targets. properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + configMap: + description: ConfigMap containing data to use for the targets. properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. + key: + description: The key to select. type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean + required: + - key type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + secret: + description: Secret containing data to use for the targets. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + caFile: + description: Path to the CA cert in the Prometheus container to + use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + configMap: + description: ConfigMap containing data to use for the targets. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + key: + description: The key to select. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean required: - - port + - key type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' + secret: + description: Secret containing data to use for the targets. properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container + for the targets. type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. + insecureSkipVerify: + description: Disable target certificate validation. type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. - items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. + keyFile: + description: Path to the client key file in the Prometheus container + for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string - required: - - name type: object - type: array - enforcedNamespaceLabel: - description: EnforcedNamespaceLabel enforces adding a namespace label - of origin for each alert and metric that is user created. The label - value will always be the namespace of the object that is being created. - type: string - evaluationInterval: - description: Interval between consecutive evaluations. - type: string - externalPrefix: - description: The external URL the Thanos Ruler instances will be available - under. This is necessary to generate correct URLs. This is necessary - if Thanos Ruler is not served from root of a DNS name. - type: string - grpcServerTlsConfig: - description: 'GRPCServerTLSConfig configures the gRPC server from which - Thanos Querier reads recorded rule data. Note: Currently only the - CAFile, CertFile, and KeyFile fields are supported. Maps to the ''--grpc-server-tls-*'' - CLI args.' - properties: - ca: - description: Stuct containing the CA cert to use for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container to - use for the targets. - type: string - cert: - description: Struct containing the client cert file for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus container - for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus container - for the targets. - type: string - keySecret: - description: Secret containing the client key file for the targets. + image: + description: Thanos container image URL. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace + to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let + you locate the referenced object inside the same namespace. properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - image: - description: Thanos container image URL. - type: string - imagePullSecrets: - description: An optional list of references to secrets in the same namespace - to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - initContainers: - description: 'InitContainers allows adding initContainers to the pod - definition. Those can be used to e.g. fetch secrets for injection - into the ThanosRuler configuration from external sources. Any errors - during the execution of an initContainer will lead to a restart of - the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other then secret fetching is - entirely outside the scope of what the maintainers will support and - by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the ThanosRuler configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart of + the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching is + entirely outside the scope of what the maintainers will support and + by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - key: - description: The key to select. + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - containerPort type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + role: + description: Role is a SELinux role label that applies + to the container. type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - user: - description: User is a SELinux user label that applies - to the container. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string + required: + - devicePath + - name type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. - type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. + name: + description: This must match the Name of a Volume. type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string required: - - port + - mountPath + - name type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels configure the external label pairs to ThanosRuler. + If not provided, default replica label `thanos_ruler_replica` will + be added as a label and be dropped in alerts. + type: object + listenLocal: + description: ListenLocal makes the Thanos ruler listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for ThanosRuler to be configured with. + type: string + logLevel: + description: Log level for ThanosRuler to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. + optional: + description: Specify whether the Secret or its key must be defined type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. - items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string required: - - name + - key + type: object + paused: + description: When a ThanosRuler deployment is paused, no actions except + for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata contains Labels and Annotations gets propagated + to the thanos ruler pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object type: object - type: array - labels: - additionalProperties: + portName: + description: Port name used for the pods and governing service. This + defaults to web type: string - description: Labels configure the external label pairs to ThanosRuler. - If not provided, default replica label `thanos_ruler_replica` will - be added as a label and be dropped in alerts. - type: object - listenLocal: - description: ListenLocal makes the Thanos ruler listen on loopback, - so that it does not bind against the Pod IP. - type: boolean - logFormat: - description: Log format for ThanosRuler to be configured with. - type: string - logLevel: - description: Log level for ThanosRuler to be configured with. - type: string - nodeSelector: - additionalProperties: + priorityClassName: + description: Priority class assigned to the Pods type: string - description: Define which Nodes the Pods are scheduled on. - type: object - objectStorageConfig: - description: ObjectStorageConfig configures object storage in Thanos. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - paused: - description: When a ThanosRuler deployment is paused, no actions except - for deletion will be performed on the underlying objects. - type: boolean - podMetadata: - description: PodMetadata contains Labels and Annotations gets propagated - to the thanos ruler pods. - properties: - annotations: - additionalProperties: + queryConfig: + description: Define configuration for connecting to thanos query instances. + If this is defined, the QueryEndpoints field will be ignored. Maps + to the `query.config` CLI argument. Only available with thanos v0.11.0 + and higher. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - labels: - additionalProperties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' - type: object - type: object - portName: - description: Port name used for the pods and governing service. This - defaults to web - type: string - priorityClassName: - description: Priority class assigned to the Pods - type: string - queryConfig: - description: Define configuration for connecting to thanos query instances. - If this is defined, the QueryEndpoints field will be ignored. Maps - to the `query.config` CLI argument. Only available with thanos v0.11.0 - and higher. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + queryEndpoints: + description: QueryEndpoints defines Thanos querier endpoints from which + to query metrics. Maps to the --query flag of thanos ruler. + items: type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - queryEndpoints: - description: QueryEndpoints defines Thanos querier endpoints from which - to query metrics. Maps to the --query flag of thanos ruler. - items: - type: string - type: array - replicas: - description: Number of thanos ruler instances to deploy. - format: int32 - type: integer - resources: - description: Resources defines the resource requirements for single - Pods. If not provided, no requests/limits will be set - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - retention: - description: Time duration ThanosRuler shall retain data for. Default - is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` - (milliseconds seconds minutes hours days weeks years). - type: string - routePrefix: - description: The route prefix ThanosRuler registers HTTP handlers for. - This allows thanos UI to be served on a sub-path. - type: string - ruleNamespaceSelector: - description: Namespaces to be selected for Rules discovery. If unspecified, - only the same namespace as the ThanosRuler object is in is used. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - ruleSelector: - description: A label selector to select which PrometheusRules to mount - for alerting and recording. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. This defaults to the default PodSecurityContext. - properties: - fsGroup: - description: "A special supplemental group that applies to all containers - in a pod. Some volume types allow the Kubelet to change the ownership - of that volume to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files created in - the volume will be owned by FSGroup) 3. The permission bits are - OR'd with rw-rw---- \n If unset, the Kubelet will not modify the - ownership and permissions of any volume." - format: int64 - type: integer - runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. May - also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux - context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. + type: array + replicas: + description: Number of thanos ruler instances to deploy. + format: int32 + type: integer + resources: + description: Resources defines the resource requirements for single + Pods. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: type: string - type: object - supplementalGroups: - description: A list of groups applied to the first process run in - each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. - items: - format: int64 - type: integer - type: array - sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object - type: array - windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. This field is alpha-level and is only - honored by servers that enable the WindowsGMSA feature flag. - type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint of - the container process. Defaults to the user specified in image - metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. This - field is beta-level and may be disabled with the WindowsRunAsUserName - feature flag. - type: string - type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount to - use to run the Thanos Ruler Pods. - type: string - storage: - description: Storage spec to specify how storage shall be used. - properties: - emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' - properties: - medium: - description: 'What type of storage medium should back this directory. - The default is "" which means to use the node''s default medium. - Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. The - default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + requests: + additionalProperties: type: string - type: object - volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - type: object - spec: - description: 'Spec defines the desired characteristics of a - volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration ThanosRuler shall retain data for. Default + is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` + (milliseconds seconds minutes hours days weeks years). + type: string + routePrefix: + description: The route prefix ThanosRuler registers HTTP handlers for. + This allows thanos UI to be served on a sub-path. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for Rules discovery. If unspecified, + only the same namespace as the ThanosRuler object is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner - can support VolumeSnapshot data source, it will create - a new volume and data will be restored to the volume at - the same time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the - provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. - type: string + required: + - key + - operator type: object - status: - description: 'Status represents the current information/status - of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + ruleSelector: + description: A label selector to select which PrometheusRules to mount + for alerting and recording. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - accessModes: - description: 'AccessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - capacity: - additionalProperties: - type: string - description: Represents the actual resources of the underlying - volume. - type: object - conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details - about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned - from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details - about last transition. - type: string - reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is - a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string + required: + - key + - operator type: object - type: object - type: object - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, operator - must be Exists; this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. Exists - is equivalent to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the - toleration (which must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By default, it is not - set, which means tolerate the taint forever (do not evict). - Zero and negative values will be treated as 0 (evict immediately) - by the system. + fsGroup: + description: "A special supplemental group that applies to all containers + in a pod. Some volume types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The owning GID will + be the FSGroup 2. The setgid bit is set (new files created in + the volume will be owned by FSGroup) 3. The permission bits are + OR'd with rw-rw---- \n If unset, the Kubelet will not modify the + ownership and permissions of any volume." format: int64 type: integer - value: - description: Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - tracingConfig: - description: TracingConfig configures tracing in Thanos. This is an - experimental feature, it may change in any upcoming release in a breaking - way. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - volumes: - description: Volumes allows configuration of additional volumes on the - output StatefulSet definition. Volumes specified will be appended - to other volumes that are generated as a result of StorageSpec objects. - items: - description: Volume represents a named volume in a pod that may be - accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux + context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising - the machine' + level: + description: Level is SELinux level label that applies to the + container. type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'Unique ID of the persistent disk resource in - AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + role: + description: Role is a SELinux role label that applies to the + container. type: string - required: - - volumeID - type: object - azureDisk: - description: AzureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' + type: + description: Type is a SELinux type label that applies to the + container. type: string - diskName: - description: The Name of the data disk in the blob storage + user: + description: User is a SELinux user label that applies to the + container. type: string - diskURI: - description: The URI the data disk in the blob storage + type: object + supplementalGroups: + description: A list of groups applied to the first process run in + each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and is only + honored by servers that enable the WindowsGMSA feature flag. type: string - kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' + runAsUserName: + description: The UserName in Windows to run the entrypoint of + the container process. Defaults to the user specified in image + metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. This + field is beta-level and may be disabled with the WindowsRunAsUserName + feature flag. type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI type: object - azureFile: - description: AzureFile represents an Azure File Service mount - on the host and bind mount to the pod. + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to + use to run the Thanos Ruler Pods. + type: string + storage: + description: Storage spec to specify how storage shall be used. + properties: + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: the name of secret that contains Azure Storage - Account Name and Key + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - shareName: - description: Share Name + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' type: string - required: - - secretName - - shareName type: object - cephfs: - description: CephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. properties: - monitors: - description: 'Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: - type: string - type: array - path: - description: 'Optional: Used as the mounted root, rather than - the full Ceph tree, default is /' + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' type: object - user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'Cinder represents a cinder volume attached and mounted - on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' + spec: + description: 'Spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner + can support VolumeSnapshot data source, it will create + a new volume and data will be restored to the volume at + the same time. If the provisioner does not support VolumeSnapshot + data source, volume will not be created and the failure + will be reported as an event. In the future, we plan to + support more data source types and the behavior of the + provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. type: string type: object - volumeID: - description: 'volume id used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: ConfigMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced ConfigMap will be projected into - the volume as a file whose name is the key and content is - the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the - ConfigMap, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + type: array + capacity: + additionalProperties: type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its keys must - be defined - type: boolean - type: object - csi: - description: CSI (Container Storage Interface) represents storage - that is handled by an external CSI driver (Alpha feature). - properties: - driver: - description: Driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to the - associated CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the secret - object containing sensitive information to pass to the CSI - driver to complete the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be empty if no secret - is required. If the secret object contains more than one - secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - readOnly: - description: Specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: VolumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: DownwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are supported.' + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details + about state of pvc properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". + lastProbeTime: + description: Last time we probed the condition. + format: date-time type: string - fieldPath: - description: Path of the field to select in the - specified API version. + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name - of the file to be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 encoded. The - first item of the relative path must not start with - ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + message: + description: Human-readable message indicating details + about last transition. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. type: string - resource: - description: 'Required: resource to select' + status: + type: string + type: + description: PersistentVolumeClaimConditionType is + a valid value of PersistentVolumeClaimCondition.Type type: string required: - - resource + - status + - type type: object - required: - - path - type: object - type: array - type: object - emptyDir: - description: 'EmptyDir represents a temporary directory that shares - a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object type: object - fc: - description: FC represents a Fibre Channel resource that is attached - to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - lun: - description: 'Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' - items: + type: object + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is an + experimental feature, it may change in any upcoming release in a breaking + way. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + volumes: + description: Volumes allows configuration of additional volumes on the + output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' type: string - type: array - wwids: - description: 'Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be - set, but not both simultaneously.' - items: + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string - type: array - type: object - flexVolume: - description: FlexVolume represents a generic volume resource that - is provisioned/attached using an exec based plugin. - properties: - driver: - description: Driver is the name of the driver to use for this - volume. - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. - type: string - options: - additionalProperties: + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' type: string - description: 'Optional: Extra command options if any.' - type: object - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the plugin - scripts. This may be empty if no secret object is specified. - If the secret object contains more than one secret, all - secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - driver - type: object - flocker: - description: Flocker represents a Flocker volume attached to a - kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'GitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision a - container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: Target directory name. Must not contain or start - with '..'. If '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, the volume - will contain the git repository in the subdirectory with - the given name. - type: string - repository: - description: Repository URL - type: string - revision: - description: Commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the host - that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'EndpointsName is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'Path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'ReadOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'HostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'Path of the directory on the host. If the path - is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'Type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to the - pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. - type: string - iqn: - description: Target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). - items: + diskName: + description: The Name of the data disk in the blob storage type: string - type: array - readOnly: - description: ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: type: string - type: object - targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'Volume''s name. Must be a DNS_LABEL and unique within - the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'NFS represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'Path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'ReadOnly here will force the NFS export to be - mounted with read-only permissions. Defaults to false. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'Server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a reference - to a PersistentVolumeClaim in the same namespace. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: ID that identifies Photon Controller persistent - disk - type: string - required: - - pdID - type: object - portworxVolume: - description: PortworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: FSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: VolumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: Items for all in one resources secrets, configmaps, - and downward API - properties: - defaultMode: - description: Mode bits to use on created files by default. - Must be a value between 0 and 0777. Directories within the - path are not affected by this setting. This might be in - conflict with other options that affect the file mode, like - fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: list of volume projections + type: array + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: - description: Projection that may be projected along with - other supported volume types + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. properties: - configMap: - description: information about the configMap data to - project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - downwardAPI: - description: information about the downwardAPI data - to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format - of the exposed resources, defaults to - "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: information about the secret data to project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project - properties: - audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the mount - point of the file to project the token into. - type: string - required: - - path - type: object + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string type: object - type: array - required: - - sources - type: object - quobyte: - description: Quobyte represents a Quobyte mount on the host that - shares a pod's lifetime - properties: - group: - description: Group to map volume access to Default is no group - type: string - readOnly: - description: ReadOnly here will force the Quobyte volume to - be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: Registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: Tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: User to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: Volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'RBD represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'Keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - type: array - pool: - description: 'The rados pool name. Default is rbd. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'SecretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + type: string + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: type: string - type: object - user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: ScaleIO represents a ScaleIO persistent volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: The host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: type: string - type: object - sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false - type: boolean - storageMode: - description: Indicates whether the storage for a volume should - be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. - type: string - system: - description: The name of the storage system as configured - in ScaleIO. - type: string - volumeName: - description: The name of a volume already created in the ScaleIO - system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'Secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the - value. If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be present. - If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' path - or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this + volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' properties: - key: - description: The key to project. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - optional: - description: Specify whether the Secret or its keys must be - defined - type: boolean - secretName: - description: 'Name of the secret in the pod''s namespace to - use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: StorageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format + of the exposed resources, defaults to + "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: type: string - type: object - volumeName: - description: VolumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within a - namespace. - type: string - volumeNamespace: - description: VolumeNamespace specifies the scope of the volume - within StorageOS. If no namespace is specified then the - Pod's namespace will be used. This allows the Kubernetes - name scoping to be mirrored within StorageOS for tighter - integration. Set VolumeName to any name to override the - default behaviour. Set to "default" if you are not using - namespaces within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: VsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. - type: string - volumePath: - description: Path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - type: object - status: - description: 'Most recent observed status of the ThanosRuler cluster. Read-only. - Not included when requesting from the apiserver, only from the ThanosRuler - Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) - targeted by this ThanosRuler deployment. - format: int32 - type: integer - paused: - description: Represents whether any actions on the underlying managed - objects are being performed. Only delete actions will be performed. - type: boolean - replicas: - description: Total number of non-terminated pods targeted by this ThanosRuler - deployment (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable pods targeted by this ThanosRuler - deployment. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated pods targeted by this ThanosRuler - deployment that have the desired version spec. - format: int32 - type: integer - required: - - availableReplicas - - paused - - replicas - - unavailableReplicas - - updatedReplicas - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the ThanosRuler cluster. Read-only. + Not included when requesting from the apiserver, only from the ThanosRuler + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this ThanosRuler deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this ThanosRuler + deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this ThanosRuler + deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this ThanosRuler + deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml index 97214ca3ea..39ba1f1144 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml @@ -1,5 +1,5 @@ {{- if and .Values.prometheusOperator.admissionWebhooks.enabled }} -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: {{ template "prometheus-operator.fullname" . }}-admission @@ -28,4 +28,8 @@ webhooks: namespace: {{ $.Release.Namespace }} name: {{ template "prometheus-operator.operator.fullname" $ }} path: /admission-prometheusrules/mutate + timeoutSeconds: {{ .Values.prometheusOperator.admissionWebhooks.timeoutSeconds }} + admissionReviewVersions: ["v1beta1", "v1"] + sideEffects: None + {{- end }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml index 6616f212d7..9370049724 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml @@ -1,5 +1,5 @@ {{- if and .Values.prometheusOperator.admissionWebhooks.enabled }} -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: name: {{ template "prometheus-operator.fullname" . }}-admission @@ -28,4 +28,8 @@ webhooks: namespace: {{ $.Release.Namespace }} name: {{ template "prometheus-operator.operator.fullname" $ }} path: /admission-prometheusrules/validate + timeoutSeconds: {{ .Values.prometheusOperator.admissionWebhooks.timeoutSeconds }} + admissionReviewVersions: ["v1beta1", "v1"] + sideEffects: None + {{- end }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml deleted file mode 100755 index d6bca7ed58..0000000000 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml +++ /dev/null @@ -1,6 +0,0 @@ -{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}} -{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }} -{{ $.Files.Get $path }} ---- -{{- end }} -{{- end }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml index 70b4126e5a..7617ad5517 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml @@ -1068,7 +1068,7 @@ prometheusOperator: enabled: true # If true prometheus operator will create and update its CRDs on startup - manageCrds: true + manageCrds: false tlsProxy: enabled: true @@ -1090,8 +1090,8 @@ prometheusOperator: patch: enabled: true image: - repository: jettech/kube-webhook-certgen - tag: v1.2.0 + repository: registry.k8s.io/ingress-nginx/kube-webhook-certgen + tag: v1.3.0 pullPolicy: IfNotPresent resources: {} ## Provide a priority class name to the webhook patching job diff --git a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml index c1ef9fde18..f891cea457 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ template "prometheus-redis-exporter.fullname" . }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml index 6b960a603b..99e4afe4fb 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ template "prometheus-redis-exporter.fullname" . }} From 65e333d94058afe4f38294ec4056329ec5ba519f Mon Sep 17 00:00:00 2001 From: saiakhil Date: Thu, 9 Feb 2023 12:08:34 +0530 Subject: [PATCH 192/203] added UCI vars in private_repo template --- .../ansible/inventory/dev/Core/secrets.yml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 8bf07e91cd..97745c7d4a 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -205,3 +205,29 @@ graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # ------------------------------------------------------------------------------------------------------------ # # Optional variables - Can be left blank if you dont plan to use the intended features lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site + +# use password generator tool like https://passwordsgenerator.net/ and generate password with length 33 +# example: hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9 +uci_api_admin_token: "hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9" + +# use password generator tool like https://passwordsgenerator.net/ and generate password with length 43 +# example: ZpfFgp75ncgs7w9rp96rMYzSFvr5T3H5QxkvsvQzmPM +hasura_graphql_admin_secret: "ZpfFgp75ncgs7w9rp96rMYzSFvr5T3H5QxkvsvQzmPM" + +# use password generator tool like https://passwordsgenerator.net/ and generate password with length 57 +# example: dZfGV6x2MLAsJTvbw87tZyhfqNgnawHz9LYrespT6WMJkmtLYLbmKVCRA +fusionauth_service_admin_key: "dZfGV6x2MLAsJTvbw87tZyhfqNgnawHz9LYrespT6WMJkmtLYLbmKVCRA" + +# use password generator tool like https://passwordsgenerator.net/ (include symbols) and generate password with length 17 +# example: ^26C~\3$T~A2xs6d# +uci_encryption_key_string: "^26C~\3$T~A2xs6d#" + +# Take the value of uci_encryption_key_string variable and use below command to get base64 encrypted string +# command: echo -n "^26C~\3$T~A2xs6d#" | base64 +uci_encryption_key_base64: "XjI2Q35cMyRUfkEyeHM2ZCM=" + +# Give some prefered username +uci_odk_user: "admin" + +# Give some prefered password +uci_odk_password: "admin" From 98ebbae2507c70344b53c904d2936d092af55db2 Mon Sep 17 00:00:00 2001 From: saiakhil Date: Thu, 9 Feb 2023 13:52:14 +0530 Subject: [PATCH 193/203] added UCI vars in private_repo template --- private_repo/ansible/inventory/dev/Core/secrets.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 97745c7d4a..1107874126 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -202,10 +202,6 @@ cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" # Graylog graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # email server password / api token -# ------------------------------------------------------------------------------------------------------------ # -# Optional variables - Can be left blank if you dont plan to use the intended features -lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site - # use password generator tool like https://passwordsgenerator.net/ and generate password with length 33 # example: hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9 uci_api_admin_token: "hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9" @@ -231,3 +227,7 @@ uci_odk_user: "admin" # Give some prefered password uci_odk_password: "admin" + +# ------------------------------------------------------------------------------------------------------------ # +# Optional variables - Can be left blank if you dont plan to use the intended features +lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site From 94fd7bafd5348b8296ac1a6c7e546c1cb2eef099 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Fri, 10 Feb 2023 14:57:25 +0530 Subject: [PATCH 194/203] Correcting the Spelling Mistake (#3730) --- ansible/roles/ml-analytics-service/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index 30b61a06cd..e031d27cbb 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -42,7 +42,7 @@ - "{{ WORKDIR }}/faust_as_service" - name: Delete the virtualenv DIR - shell: "rm -rf {{ WORKDIR }}/spark_env" + shell: "rm -rf {{ WORKDIR }}/spark_venv" become: true - name: Install python virtual environment @@ -52,7 +52,7 @@ - name: Change the ownership of virtual env become: yes file: - path: "{{ WORKDIR }}/spark_env" + path: "{{ WORKDIR }}/spark_venv" state: directory owner: "{{ USER }}" group: "{{ USER }}" From ad32ded11c06ff7c1ec0914fe794b52d8e185b9d Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Fri, 10 Feb 2023 15:35:31 +0530 Subject: [PATCH 195/203] Updated the spark_venv permissions --- ansible/roles/ml-analytics-service/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index e031d27cbb..4e5e8251ed 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -54,6 +54,7 @@ file: path: "{{ WORKDIR }}/spark_venv" state: directory + recurse: yes owner: "{{ USER }}" group: "{{ USER }}" mode: "0755" From eb484415e99c6447a1212bcfd4151920b02d0518 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Fri, 10 Feb 2023 16:53:15 +0530 Subject: [PATCH 196/203] Changing the steps (#3732) --- .../roles/ml-analytics-service/tasks/main.yml | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index 4e5e8251ed..6f37d6b3ea 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -48,16 +48,6 @@ - name: Install python virtual environment shell: "cd {{ WORKDIR }} && virtualenv --python=python3.8 spark_venv" become: true - -- name: Change the ownership of virtual env - become: yes - file: - path: "{{ WORKDIR }}/spark_venv" - state: directory - recurse: yes - owner: "{{ USER }}" - group: "{{ USER }}" - mode: "0755" - name: Create necessary logs folders for pipeline become: yes @@ -81,6 +71,16 @@ requirements: "{{ WORKDIR }}/ml-analytics-service/requirements.txt" extra_args: --upgrade +- name: Change the ownership of virtual env + become: yes + file: + path: "{{ WORKDIR }}/spark_venv" + state: directory + recurse: yes + owner: "{{ USER }}" + group: "{{ USER }}" + mode: "0755" + - name: Creating a faust service shell file in executable mode copy: src: files/faust.sh From b825539af442948746a6d110f6ca069434961361 Mon Sep 17 00:00:00 2001 From: anilgupta Date: Mon, 13 Feb 2023 16:54:09 +0530 Subject: [PATCH 197/203] Issue #KN-828 chore: Added gcp related config --- .../stack-sunbird/templates/content-service_application.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index 837298ac30..02a4022f86 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -486,6 +486,9 @@ cloud_storage_key: "{{ cloud_public_storage_accountname }}" cloud_storage_secret: "{{ cloud_public_storage_secret }}" cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" cloud_storage_container: "{{ cloud_storage_content_bucketname }}" +cloud_storage_client_id: "{{ cloud_public_storage_client_id }}" +cloud_storage_private_key_id: "{{ cloud_public_storage_private_key_id }}" +cloud_storage_project_id: "{{ cloud_public_storage_project }}" # Google Drive APIKEY learning_content_drive_apiKey = "{{ learning_content_drive_apiKey }}" From 302880a9780c5fc2629a5ba88a7d51b595b51d43 Mon Sep 17 00:00:00 2001 From: anilgupta Date: Mon, 13 Feb 2023 16:57:23 +0530 Subject: [PATCH 198/203] Issue #KN-828 chore: Added gcp related config --- .../templates/content-service_application.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index 02a4022f86..dc90131a1b 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -486,9 +486,9 @@ cloud_storage_key: "{{ cloud_public_storage_accountname }}" cloud_storage_secret: "{{ cloud_public_storage_secret }}" cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" cloud_storage_container: "{{ cloud_storage_content_bucketname }}" -cloud_storage_client_id: "{{ cloud_public_storage_client_id }}" -cloud_storage_private_key_id: "{{ cloud_public_storage_private_key_id }}" -cloud_storage_project_id: "{{ cloud_public_storage_project }}" +cloud_storage_client_id: "{{ cloud_public_storage_client_id | default('') }}" +cloud_storage_private_key_id: "{{ cloud_public_storage_private_key_id | default('') }}" +cloud_storage_project_id: "{{ cloud_public_storage_project | default('') }}" # Google Drive APIKEY learning_content_drive_apiKey = "{{ learning_content_drive_apiKey }}" From f4fac28f61ded3938c417afeebae781863ad2766 Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Wed, 15 Feb 2023 11:12:37 +0530 Subject: [PATCH 199/203] added jenkins-mobile-slave-setup.sh (#3734) --- deploy/jenkins/jenkins-mobile-slave-setup.sh | 47 ++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 deploy/jenkins/jenkins-mobile-slave-setup.sh diff --git a/deploy/jenkins/jenkins-mobile-slave-setup.sh b/deploy/jenkins/jenkins-mobile-slave-setup.sh new file mode 100644 index 0000000000..38e8369bba --- /dev/null +++ b/deploy/jenkins/jenkins-mobile-slave-setup.sh @@ -0,0 +1,47 @@ +#!/bin/bash +bold=$(tput bold) +normal=$(tput sgr0) + +echo -e "\n\e[0;32m${bold}Updating the apt repo${normal}\n" +apt update + +echo -e "\n\e[0;32m${bold}Installating JDK11${normal}\n" +apt install -y openjdk-11-jdk + +echo -e "\n\e[0;32m${bold}Installating Git ${normal}" +apt install -y git + +echo -e "\n\e[0;32m${bold}Installating zip unzip${normal}" +apt install -y unzip zip + +echo -e "\n\e[0;32m${bold}Installating JQ${normal}" +apt install -y jq + +echo -e "\n\e[0;32m${bold}Installating Gradle-6.5.1${normal}" +wget -O gradle-6.5.1.zip https://services.gradle.org/distributions/gradle-6.5.1-all.zip +unzip -q gradle-6.5.1.zip +mkdir -p /usr/lib/gradle +mv gradle-6.5.1 6.5.1 +sudo mv 6.5.1 /usr/lib/gradle/ + +echo -e "\n\e[0;32m${bold}Installating Gradle-7.4.1${normal}" +wget -O gradle-7.4.1.zip 'https://services.gradle.org/distributions/gradle-7.4.1-all.zip' +unzip -q gradle-7.4.1.zip +mkdir -p /opt/gradle +mv gradle-7.4.1 /opt/gradle/ + +echo -e "\n\e[0;32m${bold}Installating node" +wget https://nodejs.org/download/release/v12.20.0/node-v12.20.0-linux-x64.tar.gz +tar -xvf node-v12.20.0-linux-x64.tar.gz +mv node-v12.20.0-linux-x64 /usr/local/lib/ +ln -s /usr/local/lib/node-v12.20.0-linux-x64/bin/node /usr/bin/node +ln -s /usr/local/lib/node-v12.20.0-linux-x64/bin/npm /usr/bin/npm + +echo -e "\n\e[0;32m${bold}Installating node modules" +npm install -g ionic +npm install -g cordova@10.0.0 +npm install -g cordova-res +ln -s /usr/local/lib/node-v12.20.0-linux-x64/bin/ionic /usr/bin/ionic +ln -s /usr/local/lib/node-v12.20.0-linux-x64/bin/cordova /usr/bin/cordova + +echo -e "\n\e[0;32m${bold}Jenkins slave installation complete..${normal}" \ No newline at end of file From af5779530b43a3770f3477e6144bb9597d89d773 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:12:27 +0530 Subject: [PATCH 200/203] Fix: Release-5.1.0 installation issues (#3747) * Fix azcopy cmd execute issue * Update Learner service storage vars * Add new jenkins vars for KP and DP repo * Add adminutil_learner_api_key consumer api permissions --- ansible/roles/kong-consumer/defaults/main.yml | 3 +++ .../templates/sunbird_learner-service.env | 10 +++++----- deploy/jenkins/jenkins-server-setup.sh | 1 + deploy/jenkins/jenkins.yaml | 4 ++++ .../dev/jobs/DataPipeline/jobs/AdhocScripts/config.xml | 2 +- .../dev/jobs/DataPipeline/jobs/Analytics/config.xml | 2 +- .../jobs/DataPipeline/jobs/AnalyticsCore/config.xml | 2 +- .../dev/jobs/DataPipeline/jobs/ApiModule/config.xml | 2 +- .../jobs/DataPipeline/jobs/CoreDataProducts/config.xml | 2 +- .../dev/jobs/DataPipeline/jobs/DataReplay/config.xml | 2 +- .../DataPipeline/jobs/DruidAnomalyDetection/config.xml | 2 +- .../jobs/dev/jobs/DataPipeline/jobs/ETLJobs/config.xml | 2 +- .../jobs/DataPipeline/jobs/EdDataProducts/config.xml | 2 +- .../DataPipeline/jobs/FlinkPipelineJobs/config.xml | 2 +- .../dev/jobs/DataPipeline/jobs/MLWorkbench/config.xml | 2 +- .../jobs/dev/jobs/DataPipeline/jobs/Secor/config.xml | 2 +- .../KnowledgePlatform/jobs/CassandraTrigger/config.xml | 2 +- .../jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml | 2 +- .../jobs/KnowledgePlatform/jobs/Learning/config.xml | 2 +- .../dev/jobs/KnowledgePlatform/jobs/Neo4j/config.xml | 2 +- .../jobs/KnowledgePlatform/jobs/SyncTool/config.xml | 2 +- .../dev/jobs/KnowledgePlatform/jobs/Yarn/config.xml | 2 +- 22 files changed, 31 insertions(+), 23 deletions(-) diff --git a/ansible/roles/kong-consumer/defaults/main.yml b/ansible/roles/kong-consumer/defaults/main.yml index 9acffb2f50..a28f773309 100644 --- a/ansible/roles/kong-consumer/defaults/main.yml +++ b/ansible/roles/kong-consumer/defaults/main.yml @@ -68,6 +68,9 @@ anonymous_user_groups: - anonymousAppAccess - anonymousUserCreate +userAccess: + - userAccess + kong_all_consumer_groups: - announcementAccess - appAccess diff --git a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env index 3b6a3f122e..c8b8d9aca9 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env @@ -28,8 +28,8 @@ sunbird_mail_server_password={{sunbird_mail_server_password}} sunbird_mail_server_from_email={{sunbird_mail_server_from_email}} sunbird_encryption_key={{sunbird_encryption_key}} sunbird_encryption_mode={{sunbird_encryption_mode}} -sunbird_account_name={{sunbird_public_storage_account_name}} -sunbird_account_key={{sunbird_public_storage_account_key}} +sunbird_account_name={{cloud_public_storage_accountname}} +sunbird_account_key={{cloud_private_storage_secret}} sunbird_quartz_mode={{sunbird_sunbird_quartz_mode}} sunbird_env_logo_url={{sunbird_env_logo_url}} sunbird_web_url={{sunbird_web_url}} @@ -79,7 +79,7 @@ sunbird_course_batch_notification_enabled={{sunbird_course_batch_notification_en sunbird_course_batch_notification_signature={{sunbird_course_batch_notification_signature}} sunbird_otp_expiration={{sunbird_otp_expiration}} sunbird_otp_length={{sunbird_otp_length}} -sunbird_content_azure_storage_container={{sunbird_content_azure_storage_container}} +sunbird_content_azure_storage_container={{cloud_storage_content_bucketname}} # Release-1.14 sunbird_time_zone={{sunbird_time_zone}} # Release-1.15 @@ -87,8 +87,8 @@ sunbird_health_check_enable={{sunbird_health_check_enable}} sunbird_keycloak_user_federation_provider_id={{core_vault_sunbird_keycloak_user_federation_provider_id}} sunbird_gzip_enable={{sunbird_gzip_enable}} sunbird_gzip_size_threshold={{sunbird_gzip_size_threshold | default(262144)}} -sunbird_analytics_blob_account_name={{sunbird_private_storage_account_name}} -sunbird_analytics_blob_account_key={{sunbird_private_storage_account_key}} +sunbird_analytics_blob_account_name={{cloud_private_storage_accountname}} +sunbird_analytics_blob_account_key={{cloud_private_storage_secret}} # Optional for caching sunbird_cache_enable={{sunbird_cache_enable | default(false)}} # Set below variables if above true diff --git a/deploy/jenkins/jenkins-server-setup.sh b/deploy/jenkins/jenkins-server-setup.sh index 670395335b..d96e3b4228 100755 --- a/deploy/jenkins/jenkins-server-setup.sh +++ b/deploy/jenkins/jenkins-server-setup.sh @@ -89,6 +89,7 @@ apt update wget https://aka.ms/downloadazcopy-v10-linux tar -xf downloadazcopy-v10-linux cp ./azcopy_linux_amd64_*/azcopy /usr/bin/ +chmod +x /usr/bin/azcopy rm -rf downloadazcopy-v10-linux* azcopy_linux_amd* ### diff --git a/deploy/jenkins/jenkins.yaml b/deploy/jenkins/jenkins.yaml index 5f4b9523f0..6833b9be97 100644 --- a/deploy/jenkins/jenkins.yaml +++ b/deploy/jenkins/jenkins.yaml @@ -46,6 +46,10 @@ jenkins: value: "${GH_PRIVATE_REPO_URL}" - key: public_repo_branch value: "${GH_PUBLIC_REPO_BRANCH}" + - key: kp_public_repo_branch + value: "${KP_PUBLIC_REPO_BRANCH}" # branch of https://github.com/project-sunbird/sunbird-learning-platform + - key: dp_public_repo_branch + value: "${DP_PUBLIC_REPO_BRANCH}" # branch of https://github.com/project-sunbird/sunbird-data-pipeline.git - key: override_private_branch value: "true" - key: override_public_branch diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AdhocScripts/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AdhocScripts/config.xml index 9a042704ae..8b38850fbf 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AdhocScripts/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AdhocScripts/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Analytics/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Analytics/config.xml index b4458fb7cd..a43fba3f1a 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Analytics/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Analytics/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AnalyticsCore/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AnalyticsCore/config.xml index 6de1a96449..fbc389f9d2 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AnalyticsCore/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/AnalyticsCore/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ApiModule/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ApiModule/config.xml index af4862b0f2..af16918b5d 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ApiModule/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ApiModule/config.xml @@ -86,7 +86,7 @@ ArtifactRepo - Push the docker image to container registry. - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/CoreDataProducts/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/CoreDataProducts/config.xml index f93395f5e0..5951bbf3c0 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/CoreDataProducts/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/CoreDataProducts/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DataReplay/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DataReplay/config.xml index fa45925735..ac8b00eeec 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DataReplay/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DataReplay/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DruidAnomalyDetection/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DruidAnomalyDetection/config.xml index a0fd3c44f9..6e6adfc2e7 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DruidAnomalyDetection/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/DruidAnomalyDetection/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ETLJobs/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ETLJobs/config.xml index bd031ec623..0629551eab 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ETLJobs/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/ETLJobs/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/EdDataProducts/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/EdDataProducts/config.xml index 6e67c63781..fc0724ce71 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/EdDataProducts/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/EdDataProducts/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml index 78602a563a..045ac15dd5 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Push the docker image to container registry. - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/MLWorkbench/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/MLWorkbench/config.xml index 590eab1e26..d083cc3b47 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/MLWorkbench/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/MLWorkbench/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Push the docker image to container registry. - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Secor/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Secor/config.xml index d39e2833b0..15dbf5cc52 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Secor/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/DataPipeline/jobs/Secor/config.xml @@ -86,7 +86,7 @@ ArtifactRepo - Push the docker image to container registry. - ${public_repo_branch} + ${dp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/CassandraTrigger/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/CassandraTrigger/config.xml index f954ac1cbe..4b0d6ba729 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/CassandraTrigger/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/CassandraTrigger/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${kp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml index 1aa83cd9de..5df283b12c 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Push the docker image to container registry. - ${public_repo_branch} + ${kp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Learning/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Learning/config.xml index 6611070f09..5422f59573 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Learning/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Learning/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${kp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Neo4j/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Neo4j/config.xml index fd7ef60977..24395bb70a 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Neo4j/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Neo4j/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${kp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/SyncTool/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/SyncTool/config.xml index 6a9af8f80c..65cf0a8657 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/SyncTool/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/SyncTool/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${kp_public_repo_branch} false diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Yarn/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Yarn/config.xml index ab7ab56194..eb90f787aa 100644 --- a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Yarn/config.xml +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/KnowledgePlatform/jobs/Yarn/config.xml @@ -85,7 +85,7 @@ ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - J - ${public_repo_branch} + ${kp_public_repo_branch} false From ef1f7d62df393119976fc1301db204c36b0738cb Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Tue, 28 Feb 2023 11:58:07 +0530 Subject: [PATCH 201/203] Sunbird installation issue fixes (#3746) * removed questionset-publish from KP flink job list * updated keycloak provision role * removed enc-service upstream * changed the order of mail server vars * updated the order of graylog and log-es hosts --- .../roles/keycloak-provision/tasks/main.yml | 11 ++++++++ .../jobs/FlinkJobs/config.xml | 1 - .../core/nginx-public-ingress/values.j2 | 6 +--- .../ansible/inventory/dev/Core/common.yml | 28 +++++++++---------- private_repo/ansible/inventory/dev/Core/hosts | 24 ++++++++-------- 5 files changed, 38 insertions(+), 32 deletions(-) diff --git a/ansible/roles/keycloak-provision/tasks/main.yml b/ansible/roles/keycloak-provision/tasks/main.yml index bc8c3a581b..1020348520 100644 --- a/ansible/roles/keycloak-provision/tasks/main.yml +++ b/ansible/roles/keycloak-provision/tasks/main.yml @@ -27,3 +27,14 @@ with_items: - zip - unzip + +- name: Install pip2 + apt: + name: python-pip + state: present + become: true + +- name: Install Certifi python package supported to python2 + pip: + name: certifi==2018.1.18 + become: true diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml index 9a0134703e..ea3f456189 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml @@ -99,7 +99,6 @@ return """<b>This parameter is not used</b>""" 'azure_cli', 'aws_cli', 'gcloud_cli', -'oci_cli' +'oci_cli', 'all'] true diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index 9732e74960..a19745724f 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -791,7 +791,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$offline_bucket/$url_full; + proxy_pass $offline_bucket/$url_full; } # compression for svg certs download location /api/certreg/v2/certs/download {