From 354c9fdb3ecc2da58c38a28a2b187ee7fe3c42be Mon Sep 17 00:00:00 2001 From: Martin Pitt Date: Mon, 8 Apr 2024 14:23:21 +0200 Subject: [PATCH] Split s3 server certificate out of tasks secrets Tasks containers don't need, and therefore should not have, the local minio S3 server certificate. Our ci-secrets.git repo already moved the S3 certificate out of `tasks/` into the top level `s3-server/` directory. Adjust the deployment scripts and integration test accordingly. --- .../roles/local-secrets-archive/tasks/main.yml | 2 +- local-s3/install-s3-service | 2 +- tasks/build-secrets | 14 ++++++++++++++ test/test_deployment.py | 17 +++++++++++------ 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/ansible/roles/local-secrets-archive/tasks/main.yml b/ansible/roles/local-secrets-archive/tasks/main.yml index 671bc826..39749ea3 100644 --- a/ansible/roles/local-secrets-archive/tasks/main.yml +++ b/ansible/roles/local-secrets-archive/tasks/main.yml @@ -17,4 +17,4 @@ become: false run_once: yes shell: | - tar -C $XDG_RUNTIME_DIR/ci-secrets -hz --hard-dereference -c webhook s3-keys tasks > $XDG_RUNTIME_DIR/ci-secrets.tar.gz + tar -C $XDG_RUNTIME_DIR/ci-secrets -hz --hard-dereference -c webhook s3-keys s3-server tasks > $XDG_RUNTIME_DIR/ci-secrets.tar.gz diff --git a/local-s3/install-s3-service b/local-s3/install-s3-service index a4942acc..e4748958 100755 --- a/local-s3/install-s3-service +++ b/local-s3/install-s3-service @@ -11,7 +11,7 @@ systemctl stop cockpit-s3.service || true if [ -z "${DISABLE_TLS:-}" ]; then - CERT_VOLS="-v $SECRETS/tasks/s3-server.key:/root/.minio/certs/private.key:ro -v $SECRETS/tasks/s3-server.pem:/root/.minio/certs/public.crt:ro" + CERT_VOLS="-v $SECRETS/s3-server/s3-server.key:/root/.minio/certs/private.key:ro -v $SECRETS/s3-server/s3-server.pem:/root/.minio/certs/public.crt:ro" PORT=443 PROTOCOL=https else diff --git a/tasks/build-secrets b/tasks/build-secrets index c4f33e1b..d8e50e4c 100755 --- a/tasks/build-secrets +++ b/tasks/build-secrets @@ -36,6 +36,20 @@ for f in $(find -maxdepth 1 -type f -o -type l); do printf ' %s: %s\n' "${f#./}" "$(base64 --wrap=0 $f)" done +# local S3 image cache server secrets +cat < Config: # default to dummy token, tests need to opt into real one with user_github_token (config.webhook / '.config--github-token').write_text('0123abc') - # tasks secrets - config.tasks = config.secrets / 'tasks' - config.tasks.mkdir() - subprocess.run(ROOT_DIR / 'local-s3/generate-s3-cert.sh', cwd=config.tasks, check=True) + # minio S3 certificate + config.s3_server = config.secrets / 's3-server' + config.s3_server.mkdir() + subprocess.run(ROOT_DIR / 'local-s3/generate-s3-cert.sh', cwd=config.s3_server, check=True) # minio S3 key config.s3_keys = config.secrets / 's3-keys' config.s3_keys.mkdir() (config.s3_keys / 'localhost.localdomain').write_text('cockpituous foobarfoo') + # tasks secrets: none right now, but do create an empty directory to keep production structure + config.tasks = config.secrets / 'tasks' + config.tasks.mkdir() + # need to make secrets world-readable, as containers run as non-root subprocess.run(['chmod', '-R', 'go+rX', configdir], check=True) @@ -139,8 +144,8 @@ def pod(config: Config, pytestconfig) -> Iterator[PodData]: # minio S3 store data.s3 = f'cockpituous-s3-{test_instance}' subprocess.run(['podman', 'run', '-d', '--name', data.s3, f'--pod={data.pod}', *launch_args, - '-v', f'{config.tasks}/s3-server.key:/root/.minio/certs/private.key:ro', - '-v', f'{config.tasks}/s3-server.pem:/root/.minio/certs/public.crt:ro', + '-v', f'{config.s3_server}/s3-server.key:/root/.minio/certs/private.key:ro', + '-v', f'{config.s3_server}/s3-server.pem:/root/.minio/certs/public.crt:ro', '-e', 'MINIO_ROOT_USER=minioadmin', '-e', 'MINIO_ROOT_PASSWORD=minioadmin', 'quay.io/minio/minio', 'server', '/data', '--console-address', ':9001'],