From 4c7f4c0dea919b56fd1e2600426fe847ef89e4a1 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 30 Oct 2023 13:56:37 -0500 Subject: [PATCH] Remove the need for the requests library entirely (#145) * Remove the need for the requests library entirely * read cuda driver deb in 1M chunk * remove tests from being published with the layer --- layer.yaml | 4 +- lib/charms/layer/docker.py | 78 ++++--- reactive/docker.py | 405 +++++++++++++++++-------------------- tests/conftest.py | 4 +- tests/test_lib_docker.py | 40 ++-- tox.ini | 20 +- wheelhouse.txt | 1 - 7 files changed, 256 insertions(+), 296 deletions(-) diff --git a/layer.yaml b/layer.yaml index 945071e..4351f9e 100644 --- a/layer.yaml +++ b/layer.yaml @@ -13,5 +13,5 @@ defines: default: false exclude: - LAYER_README.md - - tests/10-deploy.py - - tests/tests.yaml + - tests/conftest.py + - tests/test_lib_docker.py diff --git a/lib/charms/layer/docker.py b/lib/charms/layer/docker.py index 455807d..b4550fd 100644 --- a/lib/charms/layer/docker.py +++ b/lib/charms/layer/docker.py @@ -8,14 +8,14 @@ from charmhelpers.core.templating import render docker_packages = { - 'apt': ['docker.io'], - 'upstream': ['docker-ce'], - 'nvidia': [ - 'docker-ce', - 'nvidia-docker2', - 'nvidia-container-runtime', - 'nvidia-container-runtime-hook' - ] + "apt": ["docker.io"], + "upstream": ["docker-ce"], + "nvidia": [ + "docker-ce", + "nvidia-docker2", + "nvidia-container-runtime", + "nvidia-container-runtime-hook", + ], } @@ -25,8 +25,7 @@ def arch(): :return: String """ - return check_output(['dpkg', '--print-architecture']) \ - .rstrip().decode('utf-8') + return check_output(["dpkg", "--print-architecture"]).rstrip().decode("utf-8") def determine_apt_source(): @@ -35,21 +34,19 @@ def determine_apt_source(): """ config = hookenv.config - docker_runtime = config('docker_runtime') + docker_runtime = config("docker_runtime") - if config('install_from_upstream'): - docker_runtime = 'upstream' + if config("install_from_upstream"): + docker_runtime = "upstream" - if docker_runtime == 'auto': - out = check_output(['lspci', '-nnk']).rstrip() - if arch() == 'amd64' \ - and out.decode('utf-8').lower().count('nvidia') > 0: - docker_runtime = 'nvidia' + if docker_runtime == "auto": + out = check_output(["lspci", "-nnk"]).rstrip() + if arch() == "amd64" and out.decode("utf-8").lower().count("nvidia") > 0: + docker_runtime = "nvidia" else: - docker_runtime = 'apt' + docker_runtime = "apt" - hookenv.log( - 'Setting runtime to {}'.format(docker_packages)) + hookenv.log("Setting runtime to {}".format(docker_packages)) return docker_runtime @@ -66,7 +63,7 @@ def render_configuration_template(service=False): parsed_hosts = "" if environment_config is not None: hosts = [] - for address in environment_config.get('NO_PROXY', "").split(","): + for address in environment_config.get("NO_PROXY", "").split(","): address = address.strip() try: net = ipaddress.ip_network(address) @@ -78,32 +75,25 @@ def render_configuration_template(service=False): except ValueError: hosts.append(address) parsed_hosts = ",".join(hosts) - environment_config.update({ - 'NO_PROXY': parsed_hosts, - 'no_proxy': parsed_hosts - }) - for key in ['http_proxy', 'https_proxy', 'no_proxy']: + environment_config.update({"NO_PROXY": parsed_hosts, "no_proxy": parsed_hosts}) + for key in ["http_proxy", "https_proxy", "no_proxy"]: if not modified_config.get(key): modified_config[key] = environment_config.get(key) runtime = determine_apt_source() render( - 'docker.defaults', - '/etc/default/docker', + "docker.defaults", + "/etc/default/docker", { - 'opts': opts.to_s(), - 'manual': config('docker-opts'), - 'docker_runtime': runtime - } + "opts": opts.to_s(), + "manual": config("docker-opts"), + "docker_runtime": runtime, + }, ) if service: - render( - 'docker.systemd', - '/lib/systemd/system/docker.service', - modified_config - ) + render("docker.systemd", "/lib/systemd/system/docker.service", modified_config) write_daemon_json() @@ -119,13 +109,13 @@ def write_daemon_json(): daemon_opts = json.loads(daemon_opts) kv = unitdata.kv() - daemon_opts_additions = kv.get('daemon-opts-additions', default={}) + daemon_opts_additions = kv.get("daemon-opts-additions", default={}) # Merge the key/vals from charm config into those set by the charm. # If there are any shared keys, we want the value from charm config to win. daemon_opts_additions.update(daemon_opts) - with open('/etc/docker/daemon.json', 'w') as f: + with open("/etc/docker/daemon.json", "w") as f: json.dump(daemon_opts_additions, f) return daemon_opts_additions @@ -160,9 +150,9 @@ def set_daemon_json(key, value): return False kv = unitdata.kv() - daemon_opts_additions = kv.get('daemon-opts-additions', default={}) + daemon_opts_additions = kv.get("daemon-opts-additions", default={}) daemon_opts_additions[key] = value - kv.set('daemon-opts-additions', daemon_opts_additions) + kv.set("daemon-opts-additions", daemon_opts_additions) kv.flush() return write_daemon_json() @@ -178,13 +168,13 @@ def delete_daemon_json(key): """ kv = unitdata.kv() - daemon_opts_additions = kv.get('daemon-opts-additions', default={}) + daemon_opts_additions = kv.get("daemon-opts-additions", default={}) if key not in daemon_opts_additions: return False daemon_opts_additions.pop(key) - kv.set('daemon-opts-additions', daemon_opts_additions) + kv.set("daemon-opts-additions", daemon_opts_additions) kv.flush() write_daemon_json() diff --git a/reactive/docker.py b/reactive/docker.py index 9203494..ac57e34 100644 --- a/reactive/docker.py +++ b/reactive/docker.py @@ -1,10 +1,11 @@ +import contextlib import os -import requests from shlex import split from subprocess import check_call from subprocess import check_output from subprocess import CalledProcessError from subprocess import Popen, PIPE +from urllib.request import urlopen from charmhelpers.core import host from charmhelpers.core import hookenv @@ -37,6 +38,8 @@ from charms import layer +NRPE_PRIMARY = "nrpe-external-master" # wokeignore:rule=master + # 2 Major events are emitted from this layer. # # `docker.ready` is an event intended to signal other layers that need to @@ -72,14 +75,13 @@ def unhold_all(): apt_unhold(docker_packages[k]) -@hook('upgrade-charm') +@hook("upgrade-charm") def upgrade(): """ :return: None """ hold_all() - hookenv.log( - 'Holding docker packages at current revision.') + hookenv.log("Holding docker packages at current revision.") def set_custom_docker_package(): @@ -90,14 +92,16 @@ def set_custom_docker_package(): :return: None """ runtime = determine_apt_source() - if runtime == 'custom': + if runtime == "custom": hookenv.log( - 'Adding custom package {} to environment'.format( - config('docker_runtime_package'))) - docker_packages['custom'] = [config('docker_runtime_package')] + "Adding custom package {} to environment".format( + config("docker_runtime_package") + ) + ) + docker_packages["custom"] = [config("docker_runtime_package")] -@when_not('docker.ready') +@when_not("docker.ready") def install(): """ Install the docker daemon, and supporting tooling. @@ -111,42 +115,42 @@ def install(): # incur the overhead of installing docker. This tuneable layer option # allows you to disable the exec of that install routine, and instead short # circuit immediately to docker.available, so you can charm away! - layer_opts = layer.options('docker') - if layer_opts['skip-install']: - set_state('docker.available') - set_state('docker.ready') + layer_opts = layer.options("docker") + if layer_opts["skip-install"]: + set_state("docker.available") + set_state("docker.ready") return - status_set('maintenance', 'Installing AUFS and other tools.') - kernel_release = check_output(['uname', '-r']).rstrip() + status_set("maintenance", "Installing AUFS and other tools.") + kernel_release = check_output(["uname", "-r"]).rstrip() packages = [ - 'aufs-tools', - 'git', - 'linux-image-extra-{}'.format(kernel_release.decode('utf-8')), + "aufs-tools", + "git", + "linux-image-extra-{}".format(kernel_release.decode("utf-8")), ] apt_update() apt_install(packages) # Install docker-engine from apt. runtime = determine_apt_source() - remove_state('nvidia-docker.supported') - remove_state('nvidia-docker.installed') - if runtime == 'nvidia': - set_state('nvidia-docker.supported') + remove_state("nvidia-docker.supported") + remove_state("nvidia-docker.installed") + if runtime == "nvidia": + set_state("nvidia-docker.supported") install_from_nvidia_apt() - set_state('nvidia-docker.installed') - docker.set_daemon_json('default-runtime', 'nvidia') + set_state("nvidia-docker.installed") + docker.set_daemon_json("default-runtime", "nvidia") else: - docker.delete_daemon_json('default-runtime') - if runtime == 'upstream': + docker.delete_daemon_json("default-runtime") + if runtime == "upstream": install_from_upstream_apt() - elif runtime == 'apt': + elif runtime == "apt": install_from_archive_apt() - elif runtime == 'custom': + elif runtime == "custom": if not install_from_custom_apt(): return False # If install fails, stop. else: - hookenv.log('Unknown runtime {}'.format(runtime)) + hookenv.log("Unknown runtime {}".format(runtime)) return False validate_config() @@ -154,18 +158,17 @@ def install(): reload_system_daemons() hold_all() - hookenv.log( - 'Holding docker-engine and docker.io packages at current revision.') + hookenv.log("Holding docker-engine and docker.io packages at current revision.") - host.service_restart('docker') + host.service_restart("docker") hookenv.log('Docker installed, setting "docker.ready" state.') - set_state('docker.ready') + set_state("docker.ready") # Make with the adding of the users to the groups - check_call(['usermod', '-aG', 'docker', 'ubuntu']) + check_call(["usermod", "-aG", "docker", "ubuntu"]) -@when('config.changed.install_from_upstream', 'docker.ready') +@when("config.changed.install_from_upstream", "docker.ready") def toggle_install_from_upstream(): """ :return: None @@ -173,7 +176,7 @@ def toggle_install_from_upstream(): toggle_docker_daemon_source() -@when('config.changed.apt-key-server', 'docker.ready') +@when("config.changed.apt-key-server", "docker.ready") def toggle_install_with_new_keyserver(): """ :return: None @@ -181,7 +184,7 @@ def toggle_install_with_new_keyserver(): toggle_docker_daemon_source() -@when('config.changed.docker_runtime', 'docker.ready') +@when("config.changed.docker_runtime", "docker.ready") def toggle_docker_daemon_source(): """ A disruptive reaction to config changing that will remove the existing @@ -204,20 +207,20 @@ def toggle_docker_daemon_source(): if len(installed) == 0: # We have not reached installation phase, return until # we can reasonably re-test this scenario. - hookenv.log('No supported docker runtime is installed. Noop.') + hookenv.log("No supported docker runtime is installed. Noop.") return runtime = determine_apt_source() if not docker_packages.get(runtime): - hookenv.log('Unknown runtime {}'.format(runtime)) + hookenv.log("Unknown runtime {}".format(runtime)) return False - hookenv.log('Runtime to install {}'.format(runtime)) + hookenv.log("Runtime to install {}".format(runtime)) # Workaround # https://bugs.launchpad.net/ubuntu/+source/docker.io/+bug/1724353. - if os.path.exists('/var/lib/docker/nuke-graph-directory.sh'): - hookenv.log('Workaround bug 1724353') + if os.path.exists("/var/lib/docker/nuke-graph-directory.sh"): + hookenv.log("Workaround bug 1724353") cmd = "sed -i '1i#!/bin/bash' /var/lib/docker/nuke-graph-directory.sh" check_call(split(cmd)) @@ -225,23 +228,25 @@ def toggle_docker_daemon_source(): # so we need to uninstall either of the others that are installed # and reset the state to forcea reinstall. if runtime not in installed: - host.service_stop('docker') + host.service_stop("docker") for k in docker_packages.keys(): package_list = " ".join(docker_packages[k]) - hookenv.log('Removing package(s): {}.'.format(package_list)) + hookenv.log("Removing package(s): {}.".format(package_list)) apt_unhold(docker_packages[k]) apt_purge(docker_packages[k]) - remove_state('docker.ready') - remove_state('docker.available') + remove_state("docker.ready") + remove_state("docker.available") else: - hookenv.log('Not touching packages.') + hookenv.log("Not touching packages.") -@when_any('config.changed.http_proxy', - 'config.changed.https_proxy', - 'config.changed.no_proxy', - 'config.changed.daemon-opts') -@when('docker.ready') +@when_any( + "config.changed.http_proxy", + "config.changed.https_proxy", + "config.changed.no_proxy", + "config.changed.daemon-opts", +) +@when("docker.ready") def proxy_or_daemon_opts_changed(): """ The proxy or daemon configuration have changed, render templates and @@ -258,8 +263,8 @@ def install_from_archive_apt(): :return: None """ - status_set('maintenance', 'Installing docker.io from universe.') - apt_install(['docker.io'], fatal=True) + status_set("maintenance", "Installing docker.io from universe.") + apt_install(["docker.io"], fatal=True) def install_from_upstream_apt(): @@ -269,12 +274,12 @@ def install_from_upstream_apt(): :return: None """ - status_set('maintenance', 'Installing docker-ce from upstream PPA.') - key_url = 'https://download.docker.com/linux/ubuntu/gpg' + status_set("maintenance", "Installing docker-ce from upstream PPA.") + key_url = "https://download.docker.com/linux/ubuntu/gpg" add_apt_key_url(key_url) # The url to the server that contains the docker apt packages. - apt_url = 'https://download.docker.com/linux/ubuntu' + apt_url = "https://download.docker.com/linux/ubuntu" # Get the package architecture (amd64), not the machine hardware (x86_64) architecture = arch() @@ -283,28 +288,20 @@ def install_from_upstream_apt(): lsb = host.lsb_release() # The codename for the release. - code = lsb['DISTRIB_CODENAME'] + code = lsb["DISTRIB_CODENAME"] # Repo can be: stable, edge or test. - repo = 'stable' + repo = "stable" # E.g. # deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable debs = list() - debs.append('deb [arch={}] {} {} {}'.format( - architecture, - apt_url, - code, - repo - )) + debs.append("deb [arch={}] {} {} {}".format(architecture, apt_url, code, repo)) write_docker_sources(debs) apt_update(fatal=True) # Install Docker via apt. - apt_install( - docker_packages['upstream'], - fatal=True - ) + apt_install(docker_packages["upstream"], fatal=True) def install_from_nvidia_apt(): @@ -313,49 +310,37 @@ def install_from_nvidia_apt(): :return: None """ - status_set('maintenance', 'Installing docker-engine from Nvidia PPA.') + status_set("maintenance", "Installing docker-engine from Nvidia PPA.") # Get the server and key in the apt-key management tool. - add_apt_key('9DC858229FC7DD38854AE2D88D81803C0EBFCD88') + add_apt_key("9DC858229FC7DD38854AE2D88D81803C0EBFCD88") # Install key for nvidia-docker. This key changes frequently # ([expires: 2019-09-20]) so we should do what the official docs say and # not try to get it through its fingerprint. - add_apt_key_url('https://nvidia.github.io/nvidia-container-runtime/gpgkey') + add_apt_key_url("https://nvidia.github.io/nvidia-container-runtime/gpgkey") # Get the package architecture (amd64), not the machine hardware (x86_64) architecture = arch() # Get the lsb information as a dictionary. lsb = host.lsb_release() - code = lsb['DISTRIB_CODENAME'] - release = lsb['DISTRIB_RELEASE'] - ubuntu = str(lsb['DISTRIB_ID']).lower() - docker_url = 'https://download.docker.com/linux/ubuntu' - nvidia_url = 'https://nvidia.github.io' - repo = 'stable' + code = lsb["DISTRIB_CODENAME"] + release = lsb["DISTRIB_RELEASE"] + ubuntu = str(lsb["DISTRIB_ID"]).lower() + docker_url = "https://download.docker.com/linux/ubuntu" + nvidia_url = "https://nvidia.github.io" + repo = "stable" debs = list() - debs.append('deb [arch={}] {} {} {}'.format( - architecture, - docker_url, - code, - repo - )) + debs.append("deb [arch={}] {} {} {}".format(architecture, docker_url, code, repo)) - packages = [ - 'libnvidia-container', - 'nvidia-container-runtime', - 'nvidia-docker' - ] + packages = ["libnvidia-container", "nvidia-container-runtime", "nvidia-docker"] for package in packages: - debs.append('deb {}/{}/ubuntu{}/{} /'.format( - nvidia_url, - package, - release, - architecture - )) + debs.append( + "deb {}/{}/ubuntu{}/{} /".format(nvidia_url, package, release, architecture) + ) write_docker_sources(debs) @@ -364,11 +349,12 @@ def install_from_nvidia_apt(): apt_update(fatal=True) # Actually install the required packages docker-ce nvidia-docker2. - docker_ce = hookenv.config('docker-ce-package') - nvidia_docker2 = hookenv.config('nvidia-docker-package') - nv_container_runtime = hookenv.config('nvidia-container-runtime-package') - apt_install(['cuda-drivers', docker_ce, nvidia_docker2, - nv_container_runtime], fatal=True) + docker_ce = hookenv.config("docker-ce-package") + nvidia_docker2 = hookenv.config("nvidia-docker-package") + nv_container_runtime = hookenv.config("nvidia-container-runtime-package") + apt_install( + ["cuda-drivers", docker_ce, nvidia_docker2, nv_container_runtime], fatal=True + ) def install_from_custom_apt(): @@ -377,36 +363,33 @@ def install_from_custom_apt(): :return: None or False """ - status_set('maintenance', 'Installing Docker from custom repository.') + status_set("maintenance", "Installing Docker from custom repository.") - repo_string = config('docker_runtime_repo') - key_url = config('docker_runtime_key_url') - package_name = config('docker_runtime_package') + repo_string = config("docker_runtime_repo") + key_url = config("docker_runtime_key_url") + package_name = config("docker_runtime_package") if not repo_string: - message = '`docker_runtime_repo` must be set' + message = "`docker_runtime_repo` must be set" hookenv.log(message) - hookenv.status_set('blocked', message) + hookenv.status_set("blocked", message) return False if not key_url: - message = '`docker_runtime_key_url` must be set' + message = "`docker_runtime_key_url` must be set" hookenv.log(message) - hookenv.status_set('blocked', message) + hookenv.status_set("blocked", message) return False if not package_name: - message = '`docker_runtime_package` must be set' + message = "`docker_runtime_package` must be set" hookenv.log(message) - hookenv.status_set('blocked', message) + hookenv.status_set("blocked", message) return False lsb = host.lsb_release() - format_dictionary = { - 'ARCH': arch(), - 'CODE': lsb['DISTRIB_CODENAME'] - } + format_dictionary = {"ARCH": arch(), "CODE": lsb["DISTRIB_CODENAME"]} add_apt_key_url(key_url) write_docker_sources([repo_string.format(**format_dictionary)]) @@ -425,42 +408,30 @@ def install_cuda_drivers_repo(architecture, release, ubuntu): :param ubuntu: String :return: None """ - key_file = '7fa2af80.pub' + key_file = "7fa2af80.pub" # Distribution should be something like 'ubuntu1604'. - distribution = '{}{}'.format( - ubuntu, - str(release).replace('.', '') - ) - repository_path = \ - 'developer.download.nvidia.com/compute/cuda/repos/{}/x86_64'.format( + distribution = "{}{}".format(ubuntu, str(release).replace(".", "")) + repository_path = ( + "developer.download.nvidia.com/compute/cuda/repos/{}/x86_64".format( distribution ) - command = 'apt-key adv --fetch-keys http://{}/{}'.format( - repository_path, - key_file ) + command = "apt-key adv --fetch-keys http://{}/{}".format(repository_path, key_file) check_call(split(command)) - cuda_repository_version = config('cuda_repo') - cuda_repository_package = 'cuda-repo-{}_{}_{}.deb'.format( - distribution, - cuda_repository_version, - architecture - ) - repository_url = 'https://{}/{}'.format( - repository_path, - cuda_repository_package + cuda_repository_version = config("cuda_repo") + cuda_repository_package = "cuda-repo-{}_{}_{}.deb".format( + distribution, cuda_repository_version, architecture ) + repository_url = "https://{}/{}".format(repository_path, cuda_repository_package) - r = requests.get(repository_url) - r.raise_for_status() - with open(cuda_repository_package, 'wb') as f: - for chunk in r.iter_content(chunk_size=1024): - f.write(chunk) - r.close() + with contextlib.closing(urlopen(repository_url)) as r: + with open(cuda_repository_package, "wb") as f: + while chunk := r.read(1024 * 1024): # 1M chunks + f.write(chunk) - command = 'dpkg -i {}'.format(cuda_repository_package) + command = "dpkg -i {}".format(cuda_repository_package) check_call(split(command)) @@ -472,12 +443,12 @@ def write_docker_sources(debs): :return: None """ # Run mkdir -p /etc/apt/sources.list.d. - if not os.path.isdir('/etc/apt/sources.list.d'): - os.makedirs('/etc/apt/sources.list.d') + if not os.path.isdir("/etc/apt/sources.list.d"): + os.makedirs("/etc/apt/sources.list.d") # Write the docker source file to the apt sources.list.d directory. - with open('/etc/apt/sources.list.d/docker.list', 'w+') as stream: - stream.write('\n'.join(debs)) + with open("/etc/apt/sources.list.d/docker.list", "w+") as stream: + stream.write("\n".join(debs)) def add_apt_key_url(url): @@ -487,9 +458,9 @@ def add_apt_key_url(url): :param url: String :return: None """ - curl_command = 'curl -s -L {}'.format(url).split() + curl_command = "curl -s -L {}".format(url).split() curl = Popen(curl_command, stdout=PIPE) - apt_command = 'apt-key add -'.split() + apt_command = "apt-key add -".split() check_call(apt_command, stdin=curl.stdout) curl.wait() @@ -501,33 +472,33 @@ def add_apt_key(key): :param key: String :return: None """ - keyserver = config('apt-key-server') - http_proxy = config('http_proxy') - cmd = 'apt-key adv --keyserver {0}'.format(keyserver) + keyserver = config("apt-key-server") + http_proxy = config("http_proxy") + cmd = "apt-key adv --keyserver {0}".format(keyserver) if http_proxy: - cmd = '{0} --keyserver-options http-proxy={1}'.format(cmd, http_proxy) - cmd = '{0} --recv-keys {1}'.format(cmd, key) + cmd = "{0} --keyserver-options http-proxy={1}".format(cmd, http_proxy) + cmd = "{0} --recv-keys {1}".format(cmd, key) # "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 # --recv-keys 58118E89F3A912897C070ADBF76221572C52609D" check_call(split(cmd)) -@when('docker.ready') -@when_not('cgroups.modified') +@when("docker.ready") +@when_not("cgroups.modified") def enable_grub_cgroups(): """ :return: None """ cfg = config() - if cfg.get('enable-cgroups'): - hookenv.log('Calling enable_grub_cgroups.sh and rebooting machine.') - check_call(['scripts/enable_grub_cgroups.sh']) - set_state('cgroups.modified') + if cfg.get("enable-cgroups"): + hookenv.log("Calling enable_grub_cgroups.sh and rebooting machine.") + check_call(["scripts/enable_grub_cgroups.sh"]) + set_state("cgroups.modified") -@when('docker.ready') -@when_not('docker.available') +@when("docker.ready") +@when_not("docker.available") def signal_workloads_start(): """ Signal to higher layers the container runtime is ready to run @@ -540,14 +511,14 @@ def signal_workloads_start(): # it is available for workloads. Assuming response from daemon # to be sufficient. if not _probe_runtime_availability(): - status_set('waiting', 'Container runtime not available.') + status_set("waiting", "Container runtime not available.") return - status_set('active', 'Container runtime available.') - set_state('docker.available') + status_set("active", "Container runtime available.") + set_state("docker.available") -@when('sdn-plugin.available', 'docker.available') +@when("sdn-plugin.available", "docker.available") def container_sdn_setup(sdn): """ Receive the information from the SDN plugin, and render the docker @@ -557,25 +528,25 @@ def container_sdn_setup(sdn): :return: None """ sdn_config = sdn.get_sdn_config() - bind_ip = sdn_config['subnet'] - mtu = sdn_config['mtu'] - if data_changed('bip', bind_ip) or data_changed('mtu', mtu): - status_set('maintenance', 'Configuring container runtime with SDN.') + bind_ip = sdn_config["subnet"] + mtu = sdn_config["mtu"] + if data_changed("bip", bind_ip) or data_changed("mtu", mtu): + status_set("maintenance", "Configuring container runtime with SDN.") opts = DockerOpts() # This is a great way to misconfigure a docker daemon. Remove the # existing bind ip and mtu values of the SDN - if opts.exists('bip'): - opts.pop('bip') - if opts.exists('mtu'): - opts.pop('mtu') - opts.add('bip', bind_ip) - opts.add('mtu', mtu) + if opts.exists("bip"): + opts.pop("bip") + if opts.exists("mtu"): + opts.pop("mtu") + opts.add("bip", bind_ip) + opts.add("mtu", mtu) _remove_docker_network_bridge() - set_state('docker.sdn.configured') + set_state("docker.sdn.configured") -@when_not('sdn-plugin.available') -@when('docker.sdn.configured') +@when_not("sdn-plugin.available") +@when("docker.sdn.configured") def scrub_sdn_config(): """ If this scenario of states is true, we have likely broken a @@ -587,26 +558,26 @@ def scrub_sdn_config(): """ opts = DockerOpts() try: - opts.pop('bip') + opts.pop("bip") except KeyError: - hookenv.log('Unable to locate bip in Docker config.') - hookenv.log('Assuming no action required.') + hookenv.log("Unable to locate bip in Docker config.") + hookenv.log("Assuming no action required.") try: - opts.pop('mtu') + opts.pop("mtu") except KeyError: - hookenv.log('Unable to locate mtu in Docker config.') - hookenv.log('Assuming no action required.') + hookenv.log("Unable to locate mtu in Docker config.") + hookenv.log("Assuming no action required.") # This method does everything we need to ensure the bridge configuration # has been removed. restarting the daemon restores docker with its default # networking mode. _remove_docker_network_bridge() recycle_daemon() - remove_state('docker.sdn.configured') + remove_state("docker.sdn.configured") -@when('docker.restart') +@when("docker.restart") def docker_restart(): """ Other layers should be able to trigger a daemon restart. Invoke the @@ -615,10 +586,10 @@ def docker_restart(): :return: None """ recycle_daemon() - remove_state('docker.restart') + remove_state("docker.restart") -@when('config.changed.docker-opts', 'docker.ready') +@when("config.changed.docker-opts", "docker.ready") def docker_template_update(): """ The user has passed configuration that directly effects our running @@ -630,8 +601,8 @@ def docker_template_update(): recycle_daemon() -@when('docker.ready', 'dockerhost.connected') -@when_not('dockerhost.configured') +@when("docker.ready", "dockerhost.connected") +@when_not("dockerhost.configured") def dockerhost_connected(dockerhost): """ Transmits the docker url to any subordinates requiring it. @@ -641,26 +612,25 @@ def dockerhost_connected(dockerhost): dockerhost.configure(Docker().socket) -@when('nrpe-external-master.available') -@when_not('nrpe-external-master.docker.initial-config') +@when(f"{NRPE_PRIMARY}.available") +@when_not(f"{NRPE_PRIMARY}.docker.initial-config") def initial_nrpe_config(): """ :return: None """ - set_state('nrpe-external-master.docker.initial-config') + set_state(f"{NRPE_PRIMARY}.docker.initial-config") update_nrpe_config() -@when('docker.ready') -@when('nrpe-external-master.available') -@when_any('config.changed.nagios_context', - 'config.changed.nagios_servicegroups') +@when("docker.ready") +@when(f"{NRPE_PRIMARY}.available") +@when_any("config.changed.nagios_context", "config.changed.nagios_servicegroups") def update_nrpe_config(): """ :return: None """ # List of systemd services that will be checked. - services = ['docker'] + services = ["docker"] # The current nrpe-external-master interface doesn't handle a lot of logic, # use the charm-helpers code for now. @@ -671,16 +641,16 @@ def update_nrpe_config(): nrpe_setup.write() -@when_not('nrpe-external-master.available') -@when('nrpe-external-master.docker.initial-config') +@when_not(f"{NRPE_PRIMARY}.available") +@when(f"{NRPE_PRIMARY}.docker.initial-config") def remove_nrpe_config(): """ :return: None """ - remove_state('nrpe-external-master.docker.initial-config') + remove_state(f"{NRPE_PRIMARY}.docker.initial-config") # List of systemd services for which the checks will be removed. - services = ['docker'] + services = ["docker"] # The current nrpe-external-master interface doesn't handle a lot of logic, # use the charm-helpers code for now. @@ -704,8 +674,8 @@ def validate_config(): max_line = 2048 line_prefix_len = len('Environment="NO_PROXY=""') remain_len = max_line - line_prefix_len - if len(config('no_proxy')) > remain_len: - raise ConfigError('no_proxy longer than {} chars.'.format(remain_len)) + if len(config("no_proxy")) > remain_len: + raise ConfigError("no_proxy longer than {} chars.".format(remain_len)) def recycle_daemon(): @@ -716,16 +686,16 @@ def recycle_daemon(): :return: None """ validate_config() - hookenv.log('Restarting docker service.') + hookenv.log("Restarting docker service.") # Re-render our docker daemon template at this time... because we're # restarting. And its nice to play nice with others. Isn't that nice? render_configuration_template(service=True) reload_system_daemons() - host.service_restart('docker') + host.service_restart("docker") if not _probe_runtime_availability(): - status_set('waiting', 'Container runtime not available.') + status_set("waiting", "Container runtime not available.") return @@ -735,14 +705,14 @@ def reload_system_daemons(): :return: None """ - hookenv.log('Reloading system daemons.') + hookenv.log("Reloading system daemons.") lsb = host.lsb_release() - code = lsb['DISTRIB_CODENAME'] - if code != 'trusty': - command = ['systemctl', 'daemon-reload'] + code = lsb["DISTRIB_CODENAME"] + if code != "trusty": + command = ["systemctl", "daemon-reload"] check_call(command) else: - host.service_reload('docker') + host.service_reload("docker") def _probe_runtime_availability(): @@ -752,12 +722,12 @@ def _probe_runtime_availability(): :return: Boolean """ try: - command = ['docker', 'info'] + command = ["docker", "info"] check_call(command) return True except CalledProcessError: # Remove the availability state if we fail reachability. - remove_state('docker.available') + remove_state("docker.available") return False @@ -769,16 +739,15 @@ def _remove_docker_network_bridge(): :return: None """ - status_set('maintenance', - 'Reconfiguring container runtime network bridge.') - host.service_stop('docker') - apt_install(['bridge-utils'], fatal=True) + status_set("maintenance", "Reconfiguring container runtime network bridge.") + host.service_stop("docker") + apt_install(["bridge-utils"], fatal=True) # cmd = "ifconfig docker0 down" # ifconfig doesn't always work. use native linux networking commands to # mark the bridge as inactive. - check_call(['ip', 'link', 'set', 'docker0', 'down']) - check_call(['brctl', 'delbr', 'docker0']) + check_call(["ip", "link", "set", "docker0", "down"]) + check_call(["brctl", "delbr", "docker0"]) # Render the config and restart docker. recycle_daemon() diff --git a/tests/conftest.py b/tests/conftest.py index 01b53bc..d123b40 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,5 +2,5 @@ from unittest.mock import MagicMock # mock dependencies which we don't care about covering in our tests -sys.modules['charms.docker'] = MagicMock() -sys.modules['charms.reactive'] = MagicMock() +sys.modules["charms.docker"] = MagicMock() +sys.modules["charms.reactive"] = MagicMock() diff --git a/tests/test_lib_docker.py b/tests/test_lib_docker.py index 5dc7d61..0939c9d 100644 --- a/tests/test_lib_docker.py +++ b/tests/test_lib_docker.py @@ -8,15 +8,12 @@ from charms.layer.docker import delete_daemon_json -@patch('charmhelpers.core.unitdata.kv') -@patch('charmhelpers.core.hookenv.config') +@patch("charmhelpers.core.unitdata.kv") +@patch("charmhelpers.core.hookenv.config") def test_write_daemon_json(config, kv): daemon_opts = { "log-driver": "json-file", - "log-opts": { - "max-size": "10m", - "max-file": "100" - } + "log-opts": {"max-size": "10m", "max-file": "100"}, } daemon_opts_additions = { @@ -24,60 +21,53 @@ def test_write_daemon_json(config, kv): "my-extra-config": "this-will-be-preserved", } - charm_config = { - "daemon-opts": json.dumps(daemon_opts) - } + charm_config = {"daemon-opts": json.dumps(daemon_opts)} def mock_config(key): return charm_config[key] + config.side_effect = mock_config kv.return_value.get.return_value = daemon_opts_additions - with patch('builtins.open', mock_open(), create=True): + with patch("builtins.open", mock_open(), create=True): daemon_opts_additions.update(daemon_opts) result = write_daemon_json() assert result == daemon_opts_additions -@patch('charmhelpers.core.hookenv.config') +@patch("charmhelpers.core.hookenv.config") def test_set_daemon_json(config): daemon_opts = { "log-driver": "json-file", - "log-opts": { - "max-size": "10m", - "max-file": "100" - } + "log-opts": {"max-size": "10m", "max-file": "100"}, } - charm_config = { - "daemon-opts": json.dumps(daemon_opts) - } + charm_config = {"daemon-opts": json.dumps(daemon_opts)} def mock_config(key): return charm_config[key] + config.side_effect = mock_config # Test that charm can't override a config value assert set_daemon_json("log-driver", "new value") is False - with patch('builtins.open', mock_open(), create=True): + with patch("builtins.open", mock_open(), create=True): result = set_daemon_json("log-driver", "json-file") assert result["log-driver"] == "json-file" result = set_daemon_json("my-extra-config", "value") assert result["my-extra-config"] == "value" -@patch('charmhelpers.core.unitdata.kv') +@patch("charmhelpers.core.unitdata.kv") def test_delete_daemon_json(kv): - daemon_opts_additions = { - "foo": "bar" - } + daemon_opts_additions = {"foo": "bar"} - with patch('charms.layer.docker.write_daemon_json'): + with patch("charms.layer.docker.write_daemon_json"): kv.return_value.get.return_value = {} assert delete_daemon_json("foo") is False kv.reset_mock() kv.return_value.get.return_value = daemon_opts_additions assert delete_daemon_json("foo") is True - kv().set.assert_called_once_with('daemon-opts-additions', {}) + kv().set.assert_called_once_with("daemon-opts-additions", {}) diff --git a/tox.ini b/tox.ini index 6e914d5..a267c67 100644 --- a/tox.ini +++ b/tox.ini @@ -10,11 +10,23 @@ setenv = [testenv:unit] deps = pytest - flake8 charmhelpers commands = pytest --tb native -s {posargs} +[testenv:format] +deps = + ruff + black +commands = + ruff --fix {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests + black {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests + + [testenv:lint] -deps = - flake8 -commands = flake8 {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests +deps = + ruff + black +commands = + ruff {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests + black --check {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests + diff --git a/wheelhouse.txt b/wheelhouse.txt index 7855d12..0084b85 100644 --- a/wheelhouse.txt +++ b/wheelhouse.txt @@ -1,6 +1,5 @@ charms.docker>=0.0.1,<=2.0.0 vcversioner>=2.0.0,<=3.0.0 -requests>=2.0.0,<3.0.0 # idna>=2.28.1 and beyond (needed by requests) # requires flit-core for building its wheel.