diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..debc906 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +__pycahe__/ +venv*/ + +*.pyc +butane \ No newline at end of file diff --git a/main.py b/main.py index ff2ab07..95c26c4 100644 --- a/main.py +++ b/main.py @@ -24,9 +24,12 @@ logger = logging.getLogger() handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) -handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) +handler.setFormatter(logging.Formatter('%(levelname)s:\t%(message)s')) logger.addHandler(handler) +TEMPLATES_DIRPATH = '/app/templates' +BLUEPRINT_FILEPATH = '/app/templates/__blueprints.yaml' + class Param(BaseModel): name: str = Field(title='The name of the template to be used to generate the ignition manifest', max_length=100) model_config: str|int|dict = ConfigDict(extra='allow') @@ -36,6 +39,34 @@ class Blueprint(BaseModel): template: str = Field(title='The path of the template to be used with the blueprint', max_length=100) model_config: str|int|dict = ConfigDict(extra='allow') +def _ignition_generation(butane_config_filename): + cmd = f'butane --files-dir {TEMPLATES_DIRPATH} --strict --check {butane_config_filename}'.split() + run = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + msg = { + 'message': f'Submitted butane config not valid', + 'error': f'{run.stdout.decode("utf-8")}'.split('\n'), + } + + if run.stdout: + raise SyntaxError(msg) + + from random import choice as r_choice + from string import ascii_letters + + ignition_config_filename = f'/tmp/{''.join(r_choice(ascii_letters) for _ in range(10))}' + + cmd = f'butane --files-dir {TEMPLATES_DIRPATH} --strict --pretty --raw {butane_config_filename} --output {ignition_config_filename}'.split() + subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + msg = run.stdout.decode("utf-8") + + if run.stdout: + raise Warning(msg) + + with open(ignition_config_filename) as i: + res = i.read() + + return res + description = """ ## The need @@ -72,15 +103,12 @@ def home() -> JSONResponse: @app.post('/configs', status_code=202, tags=['Configurations'], description='Generate an Ignition config from parameters query') def generate_config(param: Param,) -> JSONResponse: - out = '' - try: template_name = param.name template_path = f'./{template_name}.yaml' template_source = env.loader.get_source(env, template_path) parsed_content = env.parse(template_source) fields = meta.find_undeclared_variables(parsed_content) - DynamicParamsModel = create_model('DynamicParamsModel', **{field: (Any, ...) for field in fields if field != 'name'}, __base__=Param) values = DynamicParamsModel(**param.model_dump()) template = env.get_template(template_path) @@ -90,20 +118,25 @@ def generate_config(param: Param,) -> JSONResponse: with open(td, 'w') as t: t.write(result) - cmd = f'butane -d templates --strict {n}'.split() - run = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - out = run.stdout.decode("utf-8") + out = _ignition_generation(n) Path(n).unlink(missing_ok=True) return JSONResponse( status_code=status.HTTP_202_ACCEPTED, content=json.loads(out) ) + except SyntaxError as e: + logger.info(e) + + return JSONResponse( + status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, + content={'details': json.loads(e),} + ) except ValidationError as e: logger.info(e) return JSONResponse( - status_code = status.HTTP_422_UNPROCESSABLE_ENTITY, + status_code = status.HTTP_510_NOT_EXTENDED, content={'details': json.loads(e.json()),} ) except TemplateNotFound as e: @@ -114,31 +147,22 @@ def generate_config(param: Param,) -> JSONResponse: content={'details': f'Template {template_name} not found',} ) except Exception as e: - if out: - logger.info(out) - - return JSONResponse( - status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, - content={'details': f'Invalid Butane template submitted: {out}',} - ) - else: - logger.exception(e) + logger.exception(e) - return JSONResponse( - status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} - ) + return JSONResponse( + status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + ) -@app.get('/configs/{blueprint_id}', status_code=200, tags=['Configurations'], description='Generate an Ignition config from a predefinied blueprint') +@app.get('/configs/{blueprint_id}', status_code=200, tags=['Configurations'], description='Generate an Ignition config from a predefined blueprint') def get_config(blueprint_id: str,) -> JSONResponse: blueprint = '' - out = '' try: - with open('/app/templates/blueprints.yaml') as b: + with open(BLUEPRINT_FILEPATH) as b: blueprints = yaml.safe_load(b.read()) + blueprint = blueprints[blueprint_id] - blueprint = blueprints[blueprint_id] template = env.get_template(blueprint['template']) result = template.render(blueprint) td, n = mkstemp(text=True) @@ -146,15 +170,20 @@ def get_config(blueprint_id: str,) -> JSONResponse: with open(td, 'w') as t: t.write(result) - cmd = f'butane -d templates --strict {n}'.split() - run = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - out = run.stdout.decode("utf-8") + out = _ignition_generation(n) Path(n).unlink(missing_ok=True) return JSONResponse( status_code=status.HTTP_200_OK, content=json.loads(out) ) + except SyntaxError as e: + logger.info(e) + + return JSONResponse( + status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, + content={'details': e.msg,} + ) except KeyError as e: status_code = status.HTTP_404_NOT_FOUND @@ -176,7 +205,7 @@ def get_config(blueprint_id: str,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_501_NOT_IMPLEMENTED, - content={'details': f'Blueprints file /app/templates/blueprints.yaml not found',} + content={'details': f'Mandatory Blueprints file {BLUEPRINT_FILEPATH} not found',} ) except TemplateNotFound as e: logger.info(e) @@ -186,25 +215,17 @@ def get_config(blueprint_id: str,) -> JSONResponse: content={'details': f'Template {blueprint['template']} not found',} ) except Exception as e: - if out: - logger.info(out) - - return JSONResponse( - status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, - content={'details': f'Invalid Butane template submitted: {out}',} - ) - else: - logger.exception(e) + logger.exception(e) - return JSONResponse( - status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} - ) + return JSONResponse( + status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + ) @app.get('/blueprints/{blueprint_id}', status_code=200, tags=['Blueprints'], description='Get one specific blueprint') def get_blueprint(blueprint_id: str,) -> JSONResponse: try: - with open('/app/templates/blueprints.yaml') as b: + with open(BLUEPRINT_FILEPATH) as b: blueprints = yaml.safe_load(b.read()) blueprint = blueprints[blueprint_id] @@ -217,7 +238,7 @@ def get_blueprint(blueprint_id: str,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_501_NOT_IMPLEMENTED, - content={'details': f'Blueprints file /app/templates/blueprints.yaml not found',} + content={'details': f'Mandatory Blueprints file {BLUEPRINT_FILEPATH} not found',} ) except KeyError as e: status_code = status.HTTP_404_NOT_FOUND @@ -232,13 +253,13 @@ def get_blueprint(blueprint_id: str,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) @app.get('/blueprints', status_code=200, tags=['Blueprints'], description='Get available blueprints list') def get_blueprints() -> JSONResponse: try: - with open('/app/templates/blueprints.yaml') as b: + with open(BLUEPRINT_FILEPATH) as b: blueprints = yaml.safe_load(b.read()) return JSONResponse( @@ -250,20 +271,20 @@ def get_blueprints() -> JSONResponse: return JSONResponse( status_code = status.HTTP_501_NOT_IMPLEMENTED, - content={'details': f'Blueprints file /app/templates/blueprints.yaml not found',} + content={'details': f'Mandatory Blueprints file {BLUEPRINT_FILEPATH} not found',} ) except Exception as e: logger.exception(e) return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) @app.post('/blueprints', status_code=201, tags=['Blueprints'], description='Add a new blueprint') def add_blueprint(blueprint: Blueprint,) -> JSONResponse: try: - with open('/app/templates/blueprints.yaml') as b: + with open(BLUEPRINT_FILEPATH) as b: blueprints = yaml.safe_load(b.read()) if blueprint.name in blueprints: @@ -271,7 +292,7 @@ def add_blueprint(blueprint: Blueprint,) -> JSONResponse: blueprints[blueprint.name] = {k:v for k,v in blueprint.model_dump().items() if k != 'name'} - with open('/app/templates/blueprints.yaml', 'w') as b: + with open(BLUEPRINT_FILEPATH, 'w') as b: yaml.dump(blueprints, b) return JSONResponse( @@ -283,14 +304,14 @@ def add_blueprint(blueprint: Blueprint,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_409_CONFLICT, - content={'details': f'Blueprint ID {blueprint.name} laready exsists',} + content={'details': f'Blueprint ID {blueprint.name} already exists',} ) except FileNotFoundError as e: logger.info(e) return JSONResponse( status_code = status.HTTP_501_NOT_IMPLEMENTED, - content={'details': f'Blueprints file /app/templates/blueprints.yaml not found',} + content={'details': f'Mandatory Blueprints file {BLUEPRINT_FILEPATH} not found',} ) except KeyError as e: status_code = status.HTTP_404_NOT_FOUND @@ -305,7 +326,7 @@ def add_blueprint(blueprint: Blueprint,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) @app.put('/blueprints/{blueprint_id}', status_code=202, tags=['Blueprints'], description='Update an existing blueprint') @@ -316,12 +337,12 @@ def update_blueprint(blueprint_id: str, blueprint: Blueprint,) -> JSONResponse: content={'details': 'You are in read_append_only mode, you can only read and add templates/blueprints. Update operations are not allowed'} ) try: - with open('/app/templates/blueprints.yaml') as b: + with open(BLUEPRINT_FILEPATH) as b: blueprints = yaml.safe_load(b.read()) _, blueprints[blueprint_id] = blueprints[blueprint_id], {k:v for k,v in blueprint.model_dump().items() if k != 'name'} - with open('/app/templates/blueprints.yaml', 'w') as b: + with open(BLUEPRINT_FILEPATH, 'w') as b: yaml.dump(blueprints, b) return JSONResponse( @@ -333,7 +354,7 @@ def update_blueprint(blueprint_id: str, blueprint: Blueprint,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_501_NOT_IMPLEMENTED, - content={'details': f'Blueprints file /app/templates/blueprints.yaml not found',} + content={'details': f'Mandatory Blueprints file {BLUEPRINT_FILEPATH} not found',} ) except KeyError as e: status_code = status.HTTP_404_NOT_FOUND @@ -348,7 +369,7 @@ def update_blueprint(blueprint_id: str, blueprint: Blueprint,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) @app.get('/templates/{template_id}', status_code=200, tags=['Templates'], description='Get a specific template') @@ -358,7 +379,7 @@ def get_template(template_id: str,) -> JSONResponse: raise ValueError('Template filename should be different from Blueprint filename') return FileResponse( - path=f'/app/templates/{template_id}.yaml', + path=f'{TEMPLATES_DIRPATH}/{template_id}.yaml', filename='{template_id}.yaml' ) except ValueError as e: @@ -373,7 +394,7 @@ def get_template(template_id: str,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_501_NOT_IMPLEMENTED, - content={'details': f'Template file /app/templates/{template_id}.yaml not found',} + content={'details': f'Template file {TEMPLATES_DIRPATH}/{template_id}.yaml not found',} ) except KeyError as e: status_code = status.HTTP_404_NOT_FOUND @@ -388,7 +409,7 @@ def get_template(template_id: str,) -> JSONResponse: return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) @app.get('/templates', status_code=200, tags=['Templates'], description='Get available templates list') @@ -397,8 +418,8 @@ def get_templates() -> JSONResponse: return JSONResponse( status_code=status.HTTP_200_OK, content={ - 'path': '/app/templates', - 'templates': [t for t in env.list_templates() if t != 'blueprints.yaml'] + 'path': TEMPLATES_DIRPATH, + 'templates': [t for t in env.list_templates() if t != '__blueprints.yaml'] } ) except Exception as e: @@ -406,29 +427,29 @@ def get_templates() -> JSONResponse: return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) @app.post('/templates', status_code=201, tags=['Templates'], description='Upload a new template file') async def add_template(template: UploadFile) -> JSONResponse: already_exists = False try: - templates = [t for t in env.list_templates() if t != 'blueprints.yaml'] + templates = [t for t in env.list_templates() if t != '__blueprints.yaml'] - if template.filename == 'blueprints.yaml': + if template.filename == '__blueprints.yaml': raise ValueError('Template filename should be different from Blueprint filename') if template.filename in templates: already_exists = True raise ValueError(f'Template name {template.filename} already exists. try with a different one') - with open(f'/app/templates/{template.filename}', 'wb') as t: + with open(f'{TEMPLATES_DIRPATH}/{template.filename}', 'wb') as t: t.write(template.file.read()) return JSONResponse( status_code = status.HTTP_201_CREATED, content={ - 'details': f'Template /app/templates/{template.filename} successfully added', - 'templates': [t for t in env.list_templates() if t != 'blueprints.yaml'], + 'details': f'Template {TEMPLATES_DIRPATH}/{template.filename} successfully added', + 'templates': [t for t in env.list_templates() if t != '__blueprints.yaml'], } ) except ValueError as e: @@ -450,5 +471,5 @@ async def add_template(template: UploadFile) -> JSONResponse: return JSONResponse( status_code = status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'details': 'Unexecpected Internal Server Error Occured, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} + content={'details': 'Unexpected Internal Server Error Occurred, Please open an issue about this bug: https://github.com/soubinan/ignition-server/issues/new/choose',} ) diff --git a/templates/__base_metal__.yaml b/templates/__base_metal__.yaml deleted file mode 100644 index aa23a86..0000000 --- a/templates/__base_metal__.yaml +++ /dev/null @@ -1,109 +0,0 @@ -variant: fcos -version: 1.5.0 -storage: - files: - - path: /etc/vconsole.conf - mode: 0644 - contents: - inline: KEYMAP=us - - path: /etc/zincati/config.d/51-rollout-wariness.toml - contents: - inline: | - [identity] - rollout_wariness = 1.0 - - path: /etc/zincati/config.d/55-updates-strategy.toml - contents: - inline: | - [updates] - strategy = "periodic" - [updates.periodic] - time_zone = "localtime" - [[updates.periodic.window]] - days = [ "Sun" ] - start_time = "05:00" - length_minutes = 60 - - path: /etc/ssh/sshd_config.d/20-enable-passwords.conf - mode: 0644 - contents: - inline: | - # Fedora CoreOS disables SSH password login by default. - # Enable it. - # This file must sort before 40-disable-passwords.conf. - PasswordAuthentication yes - - path: /etc/profile.d/zz-default-editor.sh - overwrite: true - contents: - inline: | - export EDITOR=vim - links: - - path: /etc/localtime - target: /usr/share/zoneinfo/UTC -systemd: - units: - - name: provision.service - enabled: true - contents: | - [Unit] - Description=Initial System Setup (post installation) - # We run after `systemd-machine-id-commit.service` to ensure that - # `ConditionFirstBoot=true` services won't rerun on the next boot. - After=systemd-machine-id-commit.service - After=network-online.target - # We run before `zincati.service` to avoid conflicting rpm-ostree - # transactions. - Before=zincati.service - ConditionPathExists=!/var/lib/.provisioned__ - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/bin/rpm-ostree override remove docker - ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive curl vim \ - cockpit-system cockpit-ostree cockpit-podman cockpit-storaged cockpit-selinux cockpit-sosreport cockpit-networkmanager cockpit-kdump - ExecStart=/usr/bin/touch /var/lib/.provisioned__ - ExecStart=/usr/bin/systemctl --no-block reboot - - [Install] - WantedBy=multi-user.target - - name: postprovision.timer - enabled: true - contents: | - [Unit] - Description=System Post provision scheduler - ConditionPathExists=/var/lib/.provisioned__ - - [Timer] - OnBootSec=180 - Unit=postprovision.service - - [Install] - WantedBy=timers.target - - name: postprovision.service - enabled: false - contents: | - [Unit] - Description=System Post provision - # We run this after the packages have been overlaid - After=network-online.target - ConditionPathExists=/var/lib/.provisioned__ - ConditionPathExists=!/var/lib/.postprovisioned__ - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/bin/echo "PasswordAuthentication yes" > /etc/ssh/sshd_config.d/02-enable-passwords.conf - ExecStart=/usr/bin/systemctl try-restart sshd - ExecStart=/usr/bin/podman container runlabel INSTALL quay.io/cockpit/ws - ExecStart=/usr/bin/systemctl enable cockpit.service --now - ExecStart=/usr/bin/systemctl enable podman --now - ExecStart=/bin/touch /var/lib/.postprovisioned__ - ExecStart=/usr/bin/systemctl --no-block reboot -passwd: - users: - - name: soubinan - password_hash: $1$^NhsdQNu$ZwZjYMcaPBnYMFZ3hI33X0 - ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCh5qWa/Y4ZNh5TRMjOQQt24xDp6S3hhE9NT/DHouJpC34Oy9Ozd1SrOqzl7MFkOrrsDwAN0zuLelZ0jnt9V1MBAV7KW95yMKRAk8LjE09FO+8dFLZaGqKSQm2deEOe1V5YCMqQT82eRrDrZ2dfaRzDRqcJnT/U7xStqU5BxtUYXbKy3+WqVAuDHhq+BDYHgHFv9g9OOUkddZf4AnjSWY5WDFgMyGupeQFEpAEMSRGG9mMrka7mASN/zdarCyhTGgzqmkYU/FR0Zez5MZo4DiOxHuQjdNv0m8iVK5iE519UtTeu2IBei/VsoZd5d9FXwkNM/ieojAYs9kVeo3eJCmeYbwcLvOL5TyExHL9WREUFA5B4K5qE3E9OrXp/jUfIlEbLEa3cWDVbR+F2qYJogM/9EePF5GmFihioH8rQuHLkJJwJWRVlW+2GjWxRof93CjcDeU1Vg2Q6gIL85qVvPjxY+D38MKQAUWPI2z5/N+nu2QefOKlcBObWJZCW+Zquj80= soubinan@xps13 - groups: - - wheel - - sudo diff --git a/templates/__base_xcpng__.yaml b/templates/__base_xcpng__.yaml deleted file mode 100644 index f6a80d2..0000000 --- a/templates/__base_xcpng__.yaml +++ /dev/null @@ -1,111 +0,0 @@ -variant: fcos -version: 1.5.0 -storage: - files: - - path: /etc/vconsole.conf - mode: 0644 - contents: - inline: KEYMAP=us - - path: /etc/zincati/config.d/51-rollout-wariness.toml - contents: - inline: | - [identity] - rollout_wariness = 1.0 - - path: /etc/zincati/config.d/55-updates-strategy.toml - contents: - inline: | - [updates] - strategy = "periodic" - [updates.periodic] - time_zone = "localtime" - [[updates.periodic.window]] - days = [ "Sun" ] - start_time = "05:00" - length_minutes = 60 - - path: /etc/ssh/sshd_config.d/20-enable-passwords.conf - mode: 0644 - contents: - inline: | - # Fedora CoreOS disables SSH password login by default. - # Enable it. - # This file must sort before 40-disable-passwords.conf. - PasswordAuthentication yes - - path: /etc/profile.d/zz-default-editor.sh - overwrite: true - contents: - inline: | - export EDITOR=vim - links: - - path: /etc/localtime - target: /usr/share/zoneinfo/UTC -systemd: - units: - - name: provision.service - enabled: true - contents: | - [Unit] - Description=Initial System Setup (post installation) - # We run after `systemd-machine-id-commit.service` to ensure that - # `ConditionFirstBoot=true` services won't rerun on the next boot. - After=systemd-machine-id-commit.service - After=network-online.target - # We run before `zincati.service` to avoid conflicting rpm-ostree - # transactions. - Before=zincati.service - ConditionPathExists=!/var/lib/.provisioned__ - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/bin/rpm-ostree override remove docker - ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive curl vim \ - cockpit-system cockpit-ostree cockpit-podman cockpit-storaged cockpit-selinux cockpit-sosreport cockpit-networkmanager cockpit-kdump \ - xe-guest-utilities-latest - ExecStart=/usr/bin/touch /var/lib/.provisioned__ - ExecStart=/usr/bin/systemctl --no-block reboot - - [Install] - WantedBy=multi-user.target - - name: postprovision.timer - enabled: true - contents: | - [Unit] - Description=System Post provision scheduler - ConditionPathExists=/var/lib/.provisioned__ - - [Timer] - OnBootSec=180 - Unit=postprovision.service - - [Install] - WantedBy=timers.target - - name: postprovision.service - enabled: false - contents: | - [Unit] - Description=System Post provision - # We run this after the packages have been overlaid - After=network-online.target - ConditionPathExists=/var/lib/.provisioned__ - ConditionPathExists=!/var/lib/.postprovisioned__ - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/bin/echo "PasswordAuthentication yes" > /etc/ssh/sshd_config.d/02-enable-passwords.conf - ExecStart=/usr/bin/systemctl try-restart sshd - ExecStart=/usr/bin/podman container runlabel INSTALL quay.io/cockpit/ws - ExecStart=/usr/bin/systemctl enable cockpit.service --now - ExecStart=/usr/bin/systemctl enable podman --now - ExecStart=/usr/bin/systemctl enable xe-linux-distribution --now - ExecStart=/bin/touch /var/lib/.postprovisioned__ - ExecStart=/usr/bin/systemctl --no-block reboot -passwd: - users: - - name: soubinan - password_hash: $1$^NhsdQNu$ZwZjYMcaPBnYMFZ3hI33X0 - ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCh5qWa/Y4ZNh5TRMjOQQt24xDp6S3hhE9NT/DHouJpC34Oy9Ozd1SrOqzl7MFkOrrsDwAN0zuLelZ0jnt9V1MBAV7KW95yMKRAk8LjE09FO+8dFLZaGqKSQm2deEOe1V5YCMqQT82eRrDrZ2dfaRzDRqcJnT/U7xStqU5BxtUYXbKy3+WqVAuDHhq+BDYHgHFv9g9OOUkddZf4AnjSWY5WDFgMyGupeQFEpAEMSRGG9mMrka7mASN/zdarCyhTGgzqmkYU/FR0Zez5MZo4DiOxHuQjdNv0m8iVK5iE519UtTeu2IBei/VsoZd5d9FXwkNM/ieojAYs9kVeo3eJCmeYbwcLvOL5TyExHL9WREUFA5B4K5qE3E9OrXp/jUfIlEbLEa3cWDVbR+F2qYJogM/9EePF5GmFihioH8rQuHLkJJwJWRVlW+2GjWxRof93CjcDeU1Vg2Q6gIL85qVvPjxY+D38MKQAUWPI2z5/N+nu2QefOKlcBObWJZCW+Zquj80= soubinan@xps13 - groups: - - wheel - - sudo diff --git a/templates/__blueprints.yaml b/templates/__blueprints.yaml new file mode 100644 index 0000000..848b082 --- /dev/null +++ b/templates/__blueprints.yaml @@ -0,0 +1,45 @@ +controller: + template: ./controller.yaml + hostname: controller + cockpit_install: true + secrets_mount: + remote_server: 192.168.100.9 + remote_path: /mtn/SoubiDrives/VirtualizedShare/containers/.secrets + local_path: /var/mnt/shared/containers/secrets + backups_mount: + remote_server: 192.168.100.9 + remote_path: /mtn/SoubiDrives/VirtualizedShare/containers/backups + local_path: /var/mnt/shared/containers/backups + networks: + - gateway: 192.168.100.1 + interface: eno1 + ip: 192.168.100.6 + nameserver: 192.168.100.1 + prefix: 24 +controller_test: + template: ./controller.yaml + hostname: controller_test + cockpit_install: true + secrets_mount: + remote_server: 192.168.100.9 + remote_path: /mtn/SoubiDrives/VirtualizedShare/containers/.secrets + local_path: /var/mnt/shared/containers/secrets + backups_mount: + remote_server: 192.168.100.9 + remote_path: /mtn/SoubiDrives/VirtualizedShare/containers/backups + local_path: /var/mnt/shared/containers/backups + networks: + - gateway: 192.168.90.1 + interface: enp1s0 + ip: 192.168.90.6 + nameserver: 192.168.90.1 + prefix: 24 +icognito: + template: ./icognito.yaml + hostname: icognito +mediatech: + template: ./mediatech.yaml + hostname: mediatech +base: + template: ./base.yaml + hostname: coreos diff --git a/templates/base.yaml b/templates/base.yaml new file mode 100644 index 0000000..5086f7a --- /dev/null +++ b/templates/base.yaml @@ -0,0 +1,236 @@ +{% set containers = "/var/containers" %} +{% set bindings = "/var/containers/bindings" %} +{% set backups = "/var/containers/backups" %} +variant: fcos +version: 1.5.0 +storage: + directories: + - path: {{ containers }} + - path: {{ bindings }} + - path: {{ backups }} +{% block directories %} +{% endblock directories %} + files: + - path: /etc/hostname + mode: 0644 + contents: + inline: {{ hostname | default("coreos", true)}} + - path: /etc/issue.d/50_node-readiness.issue + mode: 0644 + contents: + inline: "Readiness: provisioning started...\n" + - path: /var/tmp/50_node-readiness-ongoing.issue + mode: 0644 + contents: + inline: "Readiness: provisioning still ongoing...\n" + - path: /var/tmp/50_node-readiness-ready.issue + mode: 0644 + contents: + inline: "Readiness: ready\n" + - path: /etc/profile.d/systemd-pager.sh + mode: 0644 + contents: + inline: | + # Tell systemd to not use a pager when printing information + export SYSTEMD_PAGER=cat +{% for n in networks %} + - path: /etc/NetworkManager/system-connections/{{ n.interface }}.nmconnection + mode: 0600 + contents: + inline: | + [connection] + id={{ n.interface }} + type=ethernet + interface-name={{ n.interface }} + [ipv4] + address1={{ n.ip }}/{{ n.prefix }},{{ n.gateway }} + dhcp-hostname={{ hostname }} + dns={{ n.nameserver }}; + dns-search= + may-fail=false + method=manual +{% endfor %} + - path: /etc/vconsole.conf + mode: 0644 + contents: + inline: KEYMAP=us + - path: /etc/zincati/config.d/51-rollout-wariness.toml + contents: + inline: | + [identity] + rollout_wariness = 1.0 + - path: /etc/zincati/config.d/55-updates-strategy.toml + contents: + inline: | + [updates] + strategy = "periodic" + [updates.periodic] + time_zone = "localtime" + [[updates.periodic.window]] + days = [ "Sun" ] + start_time = "05:00" + length_minutes = 60 + - path: /etc/ssh/sshd_config.d/20-enable-passwords.conf + mode: 0644 + contents: + inline: | + # Fedora CoreOS disables SSH password login by default. + # Enable it. + # This file must sort before 40-disable-passwords.conf. + PasswordAuthentication yes + - path: /etc/profile.d/zz-default-editor.sh + overwrite: true + contents: + inline: | + export EDITOR=vim +{% block files %} +{% endblock files %} + links: + - path: /etc/localtime + target: /usr/share/zoneinfo/{{ time_zone | default("UTC", true) }} +{% block links %} +{% endblock links %} +systemd: + units: + - name: provision.service + enabled: true + contents: | + [Unit] + Description=Initial System Setup (post installation) + # We run after `systemd-machine-id-commit.service` to ensure that + # `ConditionFirstBoot=true` services won't rerun on the next boot. + After=systemd-machine-id-commit.service + After=network-online.target + # We run before `zincati.service` to avoid conflicting rpm-ostree + # transactions. + Before=zincati.service + ConditionPathExists=!/var/.provisioned__ + + [Service] + Type=oneshot + RemainAfterExit=yes + # No need Docker anymore :D + ExecStart=/usr/bin/rpm-ostree override remove docker + ExecStart=/usr/bin/rpm-ostree install --allow-inactive cockpit-system cockpit-ostree cockpit-podman cockpit-storaged cockpit-packagekit cockpit-selinux cockpit-sosreport cockpit-networkmanager cockpit-kdump +{% if xeguest_install %} + ExecStart=/usr/bin/rpm-ostree install --allow-inactive xe-guest-utilities-latest + ExecStart=/usr/bin/systemctl enable xe-linux-distribution --now +{% endif %} + ExecStart=/usr/bin/systemctl enable podman --now + ExecStart=/usr/bin/touch /var/.provisioned__ + ExecStart=/usr/bin/cp /var/tmp/50_node-readiness-ongoing.issue /etc/issue.d/50_node-readiness.issue + ExecStartPost=/usr/bin/systemctl --no-block reboot + + [Install] + WantedBy=multi-user.target + - name: postprovision.timer + enabled: true + contents: | + [Unit] + Description=System Post provision scheduler + ConditionPathExists=/var/.provisioned__ + + [Timer] + OnBootSec=30 + Unit=postprovision.service + + [Install] + WantedBy=timers.target + - name: postprovision.service + enabled: false + contents: | + [Unit] + Description=System Post provision + # We run this after the packages have been overlaid + After=network-online.target + ConditionPathExists=!/var/.postprovisioned__ + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/echo "PasswordAuthentication yes" > /etc/ssh/sshd_config.d/02-enable-passwords.conf + ExecStart=/usr/bin/systemctl try-restart sshd + ExecStart=/usr/bin/touch /var/.postprovisioned__ + ExecStart=/usr/bin/cp /var/tmp/50_node-readiness-ready.issue /etc/issue.d/50_node-readiness.issue + ExecStartPost=/usr/bin/systemctl --no-block reboot +{% if cockpit_install %} + - name: cockpit-ws.service + enabled: true + contents: | + [Unit] + Description=Copckpit Webservice Container + After=network-online.target + ConditionPathExists=/var/.provisioned__ + + [Service] + Type=oneshot + RemainAfterExit=yes + TimeoutStartSec=900 + KillMode=none + ExecStartPre=-/usr/bin/podman rm -f cockpit-ws + ExecStart=/usr/bin/podman container runlabel --name "cockpit-ws --label io.containers.autoupdate=registry -v /run/podman/podman.sock:/run/user/1001/podman/podman.sock:ro,z" RUN quay.io/cockpit/ws:latest -- -p 443 + ExecStop=/usr/bin/podman rm -f cockpit-ws + + [Install] + WantedBy=multi-user.target +{% endif %} + - name: tmpcleaner.timer + enabled: true + contents: | + [Unit] + Description=Schedule /tmp Auto-Cleaner job + ConditionPathExists=/var/.postprovisioned__ + + [Timer] + Persistent=true + OnBootSec=300 + OnUnitActiveSec=7d + Unit=tmpcleaner.service + + [Install] + WantedBy=timers.target + - name: tmpcleaner.service + enabled: false + contents: | + [Unit] + Description=/tmp Auto-Cleaner job + + [Service] + ExecStart=/usr/bin/find /tmp -type f -mmin -90 -exec rm -f {} \; + - name: podmancleaner.timer + enabled: true + contents: | + [Unit] + Description=Schedule Podman Auto prune job + ConditionPathExists=/var/.postprovisioned__ + + [Timer] + Persistent=true + OnBootSec=600 + OnUnitActiveSec=7d + Unit=podmancleaner.service + + [Install] + WantedBy=timers.target + - name: podmancleaner.service + enabled: false + contents: | + [Unit] + Description=Podman Auto prune job + + [Service] + ExecStart=/usr/bin/podman image prune -af +{% block units %} +{% endblock units %} +passwd: + users: + - name: soubinan + # Generated with: podman run -ti --rm quay.io/coreos/mkpasswd --method=yescrypt + password_hash: $y$j9T$g9YKY7yIoGzRTP9SFByZM1$SveAvxg1YDrg05tW4zXhaI9XcpBOnufKuiH7ALFUGuA + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCh5qWa/Y4ZNh5TRMjOQQt24xDp6S3hhE9NT/DHouJpC34Oy9Ozd1SrOqzl7MFkOrrsDwAN0zuLelZ0jnt9V1MBAV7KW95yMKRAk8LjE09FO+8dFLZaGqKSQm2deEOe1V5YCMqQT82eRrDrZ2dfaRzDRqcJnT/U7xStqU5BxtUYXbKy3+WqVAuDHhq+BDYHgHFv9g9OOUkddZf4AnjSWY5WDFgMyGupeQFEpAEMSRGG9mMrka7mASN/zdarCyhTGgzqmkYU/FR0Zez5MZo4DiOxHuQjdNv0m8iVK5iE519UtTeu2IBei/VsoZd5d9FXwkNM/ieojAYs9kVeo3eJCmeYbwcLvOL5TyExHL9WREUFA5B4K5qE3E9OrXp/jUfIlEbLEa3cWDVbR+F2qYJogM/9EePF5GmFihioH8rQuHLkJJwJWRVlW+2GjWxRof93CjcDeU1Vg2Q6gIL85qVvPjxY+D38MKQAUWPI2z5/N+nu2QefOKlcBObWJZCW+Zquj80= soubinan@xps13 + groups: + - wheel + - sudo +{% block users %} +{% endblock users %} \ No newline at end of file diff --git a/templates/blueprints.yaml b/templates/blueprints.yaml deleted file mode 100644 index 590c876..0000000 --- a/templates/blueprints.yaml +++ /dev/null @@ -1,48 +0,0 @@ -controller: - containers_data_path: /var/cdata - disks: - - name: sda - partitions: - - format: xfs - label: cdata - size: 140000 - hostname: controller - network: - gateway: 192.168.100.1 - interface: eno1 - ip: 192.168.100.6 - nameserver: 192.168.100.1 - prefix: 24 - template: ./controller.yaml -controller_test: - containers_data_path: /var/cdata - disks: - - name: vda - partitions: - - format: xfs - label: cdata - size: 500 - hostname: controller_test - networks: - - gateway: 192.168.90.1 - interface: enp1s0 - ip: 192.168.90.6 - nameserver: 192.168.90.1 - prefix: 24 - template: ./controller.yaml -grabber: - hostname: grabber - template: ./grabber.yaml -metal: - template: ./__base_metal__.yaml -plex: - hostname: plex - template: ./plex.yaml -scraper: - hostname: scraper - template: ./scraper.yaml -tunnel: - hostname: tunnel - template: ./tunnel.yaml -xcpng: - template: ./__base_xcpng__.yaml diff --git a/templates/controller.yaml b/templates/controller.yaml index 4488707..c597b56 100644 --- a/templates/controller.yaml +++ b/templates/controller.yaml @@ -1,53 +1,104 @@ -variant: fcos -version: 1.5.0 -ignition: - config: - merge: - - local: ./__base_metal__.yaml - verification: - hash: sha256-d287f9950f77e34389aeb3b05978375109b883ca29ea44bea69c1b4d4eea0517 -storage: -{% if disks %} - disks: -{% for d in disks %} - - device: /dev/{{ d.name }} - wipe_table: false -{% for p in d.partitions %} - partitions: - - label: {{ p.label }} - size_mib: {{ p.size }} - filesystems: - - path: /var/{{ p.label }} - device: /dev/disk/by-partlabel/{{ p.label }} - format: {{ p.format | default('xfs', true)}} - with_mount_unit: true -{% endfor %} -{% endfor %} -{% endif %} - files: - - path: /etc/hostname +{% extends "base.yaml" %} +{% block directories %} + - path: {{ secrets_mount.local_path }} + - path: {{ backups_mount.local_path }} + - path: {{ bindings }}/mysql + - path: {{ bindings }}/xenorchestra + - path: {{ bindings }}/cloudstack + - path: {{ bindings }}/backup +{% endblock directories %} +{% block files %} + - path: {{ bindings }}/xenorchestra/config.toml mode: 0644 contents: - inline: {{ hostname }} -{% for n in networks %} - - path: /etc/NetworkManager/system-connections/{{ n.interface }}.nmconnection - mode: 0600 + inline: | + [http] + # Public URL to connect to this XO + # + # This optional entry is used to communicate to external entities (e.g. XO Lite) + # how to connect to this XO. + # + # It SHOULD be defined in case the IP address of the current machine is not + # good enough (e.g. a domain name must be used or there is a reverse proxy). + #publicUrl = 'https://xoa.company.lan' + + # Basic HTTP. + [[http.listen]] + # Address on which the server is listening on. + # + # Sets it to 'localhost' for IP to listen only on the local host. + # + # Default: all IPv6 addresses if available, otherwise all IPv4 addresses. + hostname = '0.0.0.0' + + # Port on which the server is listening on. + # + # Default: undefined + port = 80 + + # Connection to the Redis server. + [redis] + # Syntax: redis://[db[:password]@]hostname[:port][/db-number] + # + # Default: redis://localhost:6379/0 + uri = 'redis://redis:6379/0' + + # Configuration for plugins + [plugins] + # Each configuration is passed to the dedicated plugin instance + # + # Syntax: [plugins.] + - path: {{ bindings }}/mysql/cloudstack.cnf + mode: 0644 contents: inline: | - [connection] - id={{ n.interface }} - type=ethernet - interface-name={{ n.interface }} - [ipv4] - address1={{ n.ip }}/{{ n.prefix }},{{ n.gateway }} - dhcp-hostname={{ hostname }} - dns={{ n.nameserver }}; - dns-search= - may-fail=false - method=manual -{% endfor %} + [mysqld] + server-id=01 + innodb_rollback_on_timeout=1 + innodb_lock_wait_timeout=600 + max_connections=350 + log-bin=mysql-bin + binlog-format = 'ROW' + - path: {{ bindings }}/backup/common.env + mode: 0644 + contents: + inline: | + ########### BACKUP SCHEDULE + BACKUP_CRON_EXPRESSION="0 */6 * * *" + BACKUP_COMPRESSION="gz" + GZIP_PARALLELISM=1 + BACKUP_EXCLUDE_REGEXP="\.log$" + ########### BACKUP STORAGE + BACKUP_ARCHIVE="/archive" + ########### BACKUP PRUNING + BACKUP_RETENTION_DAYS="7" + BACKUP_PRUNING_LEEWAY="1m" + ########### BACKUP ENCRYPTION + # GPG_PASSPHRASE="" + ########### NOTIFICATIONS + # Notifications (email, Slack, etc.) can be sent out when a backup run finishes. + # Configuration is provided as a comma-separated list of URLs as consumed + # by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/ + # The content of such notifications can be customized. Dedicated documentation + # on how to do this can be found in the README. When providing multiple URLs or + # an URL that contains a comma, the values can be URL encoded to avoid ambiguities. + # The below URL demonstrates how to send an email using the provided SMTP + # configuration and credentials. + + # NOTIFICATION_URLS=smtp://username:password@host:587/?fromAddress=sender@example.com&toAddresses=recipient@example.com + # NOTIFICATION_LEVEL="error" +{% set apps = ['redis', 'mysql', 'homarr', 'xenorchestra', 'cloudstack'] %} +{% for app in apps %} + - path: {{ bindings }}/backup/{{ app }}.env + mode: 0644 + contents: + inline: | + BACKUP_SOURCES="/backup/{{ app }}" + BACKUP_FILENAME="backup-{{ app }}-%F.%N.tar.gz" + BACKUP_LATEST_SYMLINK="backup-{{ app }}-latest.tar.gz" +{% endfor %} # Containers Networks - - path: /etc/containers/systemd/frontend.network + - path: /etc/containers/systemd/base.network mode: 0644 contents: inline: | @@ -55,87 +106,273 @@ storage: Description=Frontnet container network for externally accessible services After=network-online.target + [Network] + IPRange=10.100.1.1/24 + [Install] WantedBy=multi-user.target - - path: /etc/containers/systemd/mysql.network + # Volumes +{% set volumes = ['redis_data', 'redis_dump', 'mysql_data', 'mysql_dump', 'homarr_data', 'homarr_icons', 'homarr_configs', 'homarr_dump', 'xenorchestra_data', 'xenorchestra_backup', 'xenorchestra_dump', 'cloudstack_data', 'cloudstack_configs', 'cloudstack_dump'] %} +{% for volume in volumes %} + - path: /etc/containers/systemd/{{ volume }}.volume + mode: 0644 + contents: + inline: | + [Volume] + User=core + Group=core +{% endfor %} + # Containers + - path: /etc/containers/systemd/redis.container mode: 0644 contents: inline: | [Unit] - Description=MySQL container network - After=network-online.target + Description=Redis Container + After=network-online.target pre-start.service secrets-injector.service + ConditionPathExists=/var/.postprovisioned__ + + [Service] + TimeoutStartSec=900 + + [Container] + User=core + Group=core + ContainerName=redis + Image=docker.io/library/redis:latest + Exec=redis-server --appendonly yes + AutoUpdate=registry + HostName=redis + Network=base.network + Volume=redis_data.volume:/data:Z + Volume=redis_dump.volume:/backup:z + Label=docker-volume-backup.archive-pre=/bin/cp -r /data/dump.rdb /backup/redisdump.rdb [Install] WantedBy=multi-user.target - - path: /etc/containers/systemd/redis.network + - path: /etc/containers/systemd/mysql.container mode: 0644 contents: inline: | [Unit] - Description=Redis container network - After=network-online.target + Description=MySQL Container + After=network-online.target pre-start.service secrets-injector.service + ConditionPathExists=/var/.postprovisioned__ + + [Service] + TimeoutStartSec=900 + + [Container] + User=core + Group=core + ContainerName=mysql + Image=docker.io/library/mysql:5-debian + AutoUpdate=registry + HostName=mysql + Network=base.network + Volume={{ bindings }}/mysql/cloudstack.cnf:/etc/mysql/conf.d/cloudstack.cnf:ro,Z + Volume=mysql_data.volume:/var/lib/mysql:Z + Volume=mysql_dump.volume:/backup:z + Secret=mysql-root-pwd,type=env,target=MYSQL_ROOT_PASSWORD + Secret=mysql-init,type=mount,target=/docker-entrypoint-initdb.d/01_cloudstack_init.sql + Label=docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -u root --password=$MYSQL_ROOT_PASSWORD -A > /backup/mysqldump.sql' [Install] WantedBy=multi-user.target - # Containers - - path: /etc/containers/systemd/users/homarr.container + - path: /etc/containers/systemd/homarr.container mode: 0644 contents: inline: | [Unit] Description=Homarr Container - After=network-online.target + After=network-online.target pre-start.service secrets-injector.service + ConditionPathExists=/var/.postprovisioned__ [Service] TimeoutStartSec=900 [Container] + User=core + Group=core ContainerName=homarr Image=ghcr.io/ajnart/homarr:latest AutoUpdate=registry - HostName=homarr + HostName=home.lab.soubilabs.xyz PublishPort=7575:7575 - Network=frontend.network - Volume={{ containers_data_path }}/homarr/icons:/app/public/icons:Z - Volume={{ containers_data_path }}/homarr/data:/app/data/configs:Z - Volume={{ containers_data_path }}/homarr/configs:/data:Z - Volume=/var/run/docker.sock:/var/run/docker.sock + Network=base.network + Volume=homarr_data.volume:/data:Z + Volume=homarr_icons.volume:/app/public/icons:Z + Volume=homarr_configs.volume:/app/data/configs:Z + Volume=homarr_dump.volume:/backup:z + Volume=/run/podman/podman.sock:/var/run/docker.sock:ro,z + SecurityLabelDisable=true + Label=docker-volume-backup.archive-pre=/bin/cp -r /data /app/public/icons /app/data/configs /backup + # Label=docker-volume-backup.stop-during-backup=true [Install] WantedBy=multi-user.target - # - path: /etc/containers/systemd/xenorchestra.container - # mode: 0644 - # contents: - # inline: | - # [Unit] - # Description=XEN Orchestra Container - # After=network-online.target - # ConditionPathExists=/var/lib/.provisioned__ - # ConditionPathExists=/var/lib/.postprovisioned__ - - # [Service] - # TimeoutStartSec=900 - - # [Container] - # ExecStart=/usr/bin/rpm-ostree override remove docker - - # [Install] - # WantedBy=multi-user.target - # - path: /etc/containers/systemd/cloudstack.container - # mode: 0644 - # contents: - # inline: | - # [Unit] - # Description=CloudStack Management Container - # After=network-online.target - # ConditionPathExists=/var/lib/.provisioned__ - # ConditionPathExists=/var/lib/.postprovisioned__ - - # [Service] - # TimeoutStartSec=900 - - # [Container] - # ExecStart=/usr/bin/rpm-ostree override remove docker - - # [Install] - # WantedBy=multi-user.target + - path: /etc/containers/systemd/xenorchestra.container + mode: 0644 + contents: + inline: | + [Unit] + Description=XEN Orchestra Container + After=network-online.target pre-start.service secrets-injector.service redis.service + Wants=redis.service + ConditionPathExists=/var/.postprovisioned__ + + [Service] + TimeoutStartSec=900 + + [Container] + User=core + Group=core + ContainerName=xenorchestra + Image=ghcr.io/soubinan/xoa-container:latest + AutoUpdate=registry + HostName=xcpng.lab.soubilabs.xyz + PublishPort=8088:80 + Network=base.network + Volume={{ bindings }}/xenorchestra/config.toml:/etc/xo-server/config.toml:ro,Z + Volume=xenorchestra_data.volume:/var/lib/xo-server/data:Z + Volume=xenorchestra_backup.volume:/var/lib/xoa-backup:Z + Volume=xenorchestra_dump.volume:/backup:z + PodmanArgs= --privileged + Label=docker-volume-backup.archive-pre=/bin/cp -r /etc/xo-server /var/lib/xo-server/data /var/lib/xoa-backup /backup + # Label=docker-volume-backup.stop-during-backup=true + + [Install] + WantedBy=multi-user.target + - path: /etc/containers/systemd/cloudstack.container + mode: 0644 + contents: + inline: | + [Unit] + Description=Cloudstack Container + After=network-online.target pre-start.service secrets-injector.service mysql.service + Wants=mysql.service + ConditionPathExists=/var/.postprovisioned__ + + [Service] + TimeoutStartSec=900 + + [Container] + User=core + Group=core + ContainerName=cloudstack + Image=ghcr.io/soubinan/cloudstack-mgt:latest + AutoUpdate=registry + HostName=cloud.lab.soubilabs.xyz + PublishPort=8080:8080 + PublishPort=8443:8443 + PublishPort=9090:9090 + PublishPort=8250:8250 + Network=base.network + IP=10.100.1.123 + Volume=cloudstack_configs.volume:/etc/cloudstack:z + Volume=cloudstack_data.volume:/var/cloudstack:z + Secret=cloudstack-env,type=mount,target=/etc/default/cloudstack-init + PodmanArgs= --privileged + Label=docker-volume-backup.archive-pre=/bin/cp -r /etc/cloudstack /var/cloudstack /backup + # Label=docker-volume-backup.stop-during-backup=true + + [Install] + WantedBy=multi-user.target + - path: /etc/containers/systemd/backup.container + mode: 0644 + contents: + inline: | + [Unit] + Description=Container Volume Backup + After=network-online.target pre-start.service secrets-injector.service + ConditionPathExists=/var/.postprovisioned__ + + [Service] + TimeoutStartSec=900 + + [Container] + User=core + Group=core + ContainerName=backup + Image=docker.io/offen/docker-volume-backup:latest + AutoUpdate=registry + EnvironmentFile={{ bindings }}/backup/.env + # Volume=/run/podman/podman.sock:/var/run/docker.sock:ro,z + Volume={{ backups_mount.local_path }}:/archive:ro,z + Volume=redis_dump.volume:/backup/redis:ro,z + Volume=mysql_dump.volume:/backup/mysql:ro,z + Volume=homarr_dump.volume:/backup/homarr:ro,z + Volume=xenorchestra_dump.volume:/backup/xenorchestra:ro,z + Volume=cloudstack_dump.volume:/backup/cloudstack:ro,z + + [Install] + WantedBy=multi-user.target +{% endblock files %} +{% block units %} + - name: {{ (secrets_mount.local_path| trim("/")) | replace("/", "-") }}.mount + enabled: true + contents: | + [Unit] + Description=Mount data directory + After=network-online.target pre-start.service + + [Mount] + What={{ secrets_mount.remote_server }}:{{ secrets_mount.remote_path }} + Where={{ secrets_mount.local_path }} + Type=nfs4 + + [Install] + WantedBy=multi-user.target + - name: {{ (backups_mount.local_path| trim("/")) | replace("/", "-") }}.mount + enabled: true + contents: | + [Unit] + Description=Mount data directory + After=network-online.target pre-start.service + + [Mount] + What={{ backups_mount.server }}:{{ backups_mount.path }} + Where={{ backups_mount.local_path }} + Type=nfs4 + + [Install] + WantedBy=multi-user.target + - name: secrets-injector.service + enabled: true + contents: | + [Unit] + Description=Podman Secrets Injector Job + After=network-online.target pre-start.service {{ (secrets_mount.local_path| trim("/")) | replace("/", "-") }}.mount + Wants={{ (secrets_mount.local_path| trim("/")) | replace("/", "-") }}.mount + ConditionPathExists=/var/.postprovisioned__ + ConditionPathExists=!{{ containers }}/.secrets_ready__ + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/podman secret create mysql-root-pwd {{ secrets_mount.local_path }}/MYSQL_ROOT_PWD + ExecStart=/usr/bin/podman secret create mysql-init {{ secrets_mount.local_path }}/01_cloudstack_init.sql + ExecStart=/usr/bin/podman secret create cloudstack-env {{ secrets_mount.local_path }}/CLOUDSTACK.env + # Secrets injection terminated + ExecStart=/usr/bin/touch {{ containers }}/.secrets_ready__ + + [Install] + WantedBy=multi-user.target + - name: pre-start.service + enabled: true + contents: | + [Unit] + Description=Post Setup Job + After=network-online.target + ConditionPathExists=/var/.postprovisioned__ + ConditionPathExists=!/var/.pre-start__ + + [Service] + Type=oneshot + RemainAfterExit=yes + # Special config setup here + ExecStart=/usr/bin/touch /var/.pre-start__ + + [Install] + WantedBy=multi-user.target +{% endblock units %} \ No newline at end of file diff --git a/templates/plex.yaml b/templates/plex.yaml deleted file mode 100644 index 0ec8350..0000000 --- a/templates/plex.yaml +++ /dev/null @@ -1,14 +0,0 @@ -variant: fcos -version: 1.5.0 -ignition: - config: - merge: - - local: ./__base_xcpng__.yaml - verification: - hash: sha256-3e9c2d2b5ea0df86b76f3fc8d243cbf72b6e2b9ab4d19546135dcb1d4bf6be02 -storage: - files: - - path: /etc/hostname - mode: 0644 - contents: - inline: {{ hostname }} \ No newline at end of file