diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 834994a4..70d910b1 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -49,9 +49,7 @@ jobs: build: name: Build charms - uses: canonical/data-platform-workflows/.github/workflows/build_charms_with_cache.yaml@v6.1.1 - with: - charmcraft-snap-revision: 1349 + uses: canonical/data-platform-workflows/.github/workflows/build_charms_with_cache.yaml@v7 integration-test: strategy: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d21e2650..a14ca065 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -35,7 +35,7 @@ jobs: build: name: Build charm - uses: canonical/data-platform-workflows/.github/workflows/build_charm_without_cache.yaml@v6.1.1 + uses: canonical/data-platform-workflows/.github/workflows/build_charm_without_cache.yaml@v7 release: name: Release to Charmhub @@ -43,7 +43,7 @@ jobs: - lib-check - ci-tests - build - uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v6.1.1 + uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v7 with: channel: 3/edge artifact-name: ${{ needs.build.outputs.artifact-name }} diff --git a/.github/workflows/sync_issue_to_jira.yaml b/.github/workflows/sync_issue_to_jira.yaml index e0c09c79..d3ada523 100644 --- a/.github/workflows/sync_issue_to_jira.yaml +++ b/.github/workflows/sync_issue_to_jira.yaml @@ -9,7 +9,7 @@ on: jobs: sync: name: Sync GitHub issue to Jira - uses: canonical/data-platform-workflows/.github/workflows/sync_issue_to_jira.yaml@v6.1.1 + uses: canonical/data-platform-workflows/.github/workflows/sync_issue_to_jira.yaml@v7 with: jira-base-url: https://warthogs.atlassian.net jira-project-key: DPE diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index e9152e1d..714eace4 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Library to manage the relation for the data-platform products. +r"""Library to manage the relation for the data-platform products. This library contains the Requires and Provides classes for handling the relation between an application and multiple managed application supported by the data-team: @@ -144,6 +144,19 @@ def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: ``` +When it's needed to check whether a plugin (extension) is enabled on the PostgreSQL +charm, you can use the is_postgresql_plugin_enabled method. To use that, you need to +add the following dependency to your charmcraft.yaml file: + +```yaml + +parts: + charm: + charm-binary-python-packages: + - psycopg[binary] + +``` + ### Provider Charm Following an example of using the DatabaseRequestedEvent, in the context of the @@ -278,22 +291,26 @@ def _on_topic_requested(self, event: TopicRequestedEvent): exchanged in the relation databag. """ +import copy import json import logging from abc import ABC, abstractmethod from collections import namedtuple from datetime import datetime -from typing import List, Optional +from enum import Enum +from typing import Callable, Dict, List, Optional, Set, Tuple, Union +from ops import JujuVersion, Secret, SecretInfo, SecretNotFoundError from ops.charm import ( CharmBase, CharmEvents, RelationChangedEvent, + RelationCreatedEvent, RelationEvent, - RelationJoinedEvent, + SecretChangedEvent, ) from ops.framework import EventSource, Object -from ops.model import Relation +from ops.model import Application, ModelError, Relation, Unit # The unique Charmhub library identifier, never change it LIBID = "6c3e6b6680d64e9c89e611d1a15f65be" @@ -303,7 +320,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 9 +LIBPATCH = 24 PYDEPS = ["ops>=2.0.0"] @@ -318,7 +335,79 @@ def _on_topic_requested(self, event: TopicRequestedEvent): deleted - key that were deleted""" -def diff(event: RelationChangedEvent, bucket: str) -> Diff: +PROV_SECRET_PREFIX = "secret-" +REQ_SECRET_FIELDS = "requested-secrets" + + +class SecretGroup(Enum): + """Secret groups as constants.""" + + USER = "user" + TLS = "tls" + EXTRA = "extra" + + +# Local map to associate mappings with secrets potentially as a group +SECRET_LABEL_MAP = { + "username": SecretGroup.USER, + "password": SecretGroup.USER, + "uris": SecretGroup.USER, + "tls": SecretGroup.TLS, + "tls-ca": SecretGroup.TLS, +} + + +class DataInterfacesError(Exception): + """Common ancestor for DataInterfaces related exceptions.""" + + +class SecretError(Exception): + """Common ancestor for Secrets related exceptions.""" + + +class SecretAlreadyExistsError(SecretError): + """A secret that was to be added already exists.""" + + +class SecretsUnavailableError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class SecretsIllegalUpdateError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +def get_encoded_dict( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[Dict[str, str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "{}")) + if isinstance(data, dict): + return data + logger.error("Unexpected datatype for %s instead of dict.", str(data)) + + +def get_encoded_list( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[List[str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "[]")) + if isinstance(data, list): + return data + logger.error("Unexpected datatype for %s instead of list.", str(data)) + + +def set_encoded_field( + relation: Relation, + member: Union[Unit, Application], + field: str, + value: Union[str, list, Dict[str, str]], +) -> None: + """Set an encoded field from relation data.""" + relation.data[member].update({field: json.dumps(value)}) + + +def diff(event: RelationChangedEvent, bucket: Union[Unit, Application]) -> Diff: """Retrieves the diff of the data in the relation changed databag. Args: @@ -330,31 +419,173 @@ def diff(event: RelationChangedEvent, bucket: str) -> Diff: keys from the event relation databag. """ # Retrieve the old data from the data key in the application relation databag. - old_data = json.loads(event.relation.data[bucket].get("data", "{}")) + old_data = get_encoded_dict(event.relation, bucket, "data") + + if not old_data: + old_data = {} + # Retrieve the new data from the event relation databag. - new_data = { - key: value for key, value in event.relation.data[event.app].items() if key != "data" - } + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) # These are the keys that were added to the databag and triggered this event. - added = new_data.keys() - old_data.keys() + added = new_data.keys() - old_data.keys() # pyright: ignore [reportGeneralTypeIssues] # These are the keys that were removed from the databag and triggered this event. - deleted = old_data.keys() - new_data.keys() + deleted = old_data.keys() - new_data.keys() # pyright: ignore [reportGeneralTypeIssues] # These are the keys that already existed in the databag, # but had their values changed. - changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]} + changed = { + key + for key in old_data.keys() & new_data.keys() # pyright: ignore [reportGeneralTypeIssues] + if old_data[key] != new_data[key] # pyright: ignore [reportGeneralTypeIssues] + } # Convert the new_data to a serializable format and save it for a next diff check. - event.relation.data[bucket].update({"data": json.dumps(new_data)}) + set_encoded_field(event.relation, bucket, "data", new_data) # Return the diff with all possible changes. return Diff(added, changed, deleted) -# Base DataProvides and DataRequires +def leader_only(f): + """Decorator to ensure that only leader can perform given operation.""" + + def wrapper(self, *args, **kwargs): + if not self.local_unit.is_leader(): + logger.error( + "This operation (%s()) can only be performed by the leader unit", f.__name__ + ) + return + return f(self, *args, **kwargs) + return wrapper -class DataProvides(Object, ABC): - """Base provides-side of the data products relation.""" + +def juju_secrets_only(f): + """Decorator to ensure that certain operations would be only executed on Juju3.""" + + def wrapper(self, *args, **kwargs): + if not self.secrets_enabled: + raise SecretsUnavailableError("Secrets unavailable on current Juju version") + return f(self, *args, **kwargs) + + return wrapper + + +class Scope(Enum): + """Peer relations scope.""" + + APP = "app" + UNIT = "unit" + + +class CachedSecret: + """Locally cache a secret. + + The data structure is precisely re-using/simulating as in the actual Secret Storage + """ + + def __init__(self, charm: CharmBase, label: str, secret_uri: Optional[str] = None): + self._secret_meta = None + self._secret_content = {} + self._secret_uri = secret_uri + self.label = label + self.charm = charm + + def add_secret(self, content: Dict[str, str], relation: Relation) -> Secret: + """Create a new secret.""" + if self._secret_uri: + raise SecretAlreadyExistsError( + "Secret is already defined with uri %s", self._secret_uri + ) + + secret = self.charm.app.add_secret(content, label=self.label) + secret.grant(relation) + self._secret_uri = secret.id + self._secret_meta = secret + return self._secret_meta + + @property + def meta(self) -> Optional[Secret]: + """Getting cached secret meta-information.""" + if not self._secret_meta: + if not (self._secret_uri or self.label): + return + try: + self._secret_meta = self.charm.model.get_secret(label=self.label) + except SecretNotFoundError: + if self._secret_uri: + self._secret_meta = self.charm.model.get_secret( + id=self._secret_uri, label=self.label + ) + return self._secret_meta + + def get_content(self) -> Dict[str, str]: + """Getting cached secret content.""" + if not self._secret_content: + if self.meta: + try: + self._secret_content = self.meta.get_content(refresh=True) + except (ValueError, ModelError) as err: + # https://bugs.launchpad.net/juju/+bug/2042596 + # Only triggered when 'refresh' is set + msg = "ERROR either URI or label should be used for getting an owned secret but not both" + if isinstance(err, ModelError) and msg not in str(err): + raise + # Due to: ValueError: Secret owner cannot use refresh=True + self._secret_content = self.meta.get_content() + return self._secret_content + + def set_content(self, content: Dict[str, str]) -> None: + """Setting cached secret content.""" + if not self.meta: + return + + if content: + self.meta.set_content(content) + self._secret_content = content + else: + self.meta.remove_all_revisions() + + def get_info(self) -> Optional[SecretInfo]: + """Wrapper function to apply the corresponding call on the Secret object within CachedSecret if any.""" + if self.meta: + return self.meta.get_info() + + +class SecretCache: + """A data structure storing CachedSecret objects.""" + + def __init__(self, charm): + self.charm = charm + self._secrets: Dict[str, CachedSecret] = {} + + def get(self, label: str, uri: Optional[str] = None) -> Optional[CachedSecret]: + """Getting a secret from Juju Secret store or cache.""" + if not self._secrets.get(label): + secret = CachedSecret(self.charm, label, uri) + if secret.meta: + self._secrets[label] = secret + return self._secrets.get(label) + + def add(self, label: str, content: Dict[str, str], relation: Relation) -> CachedSecret: + """Adding a secret to Juju Secret.""" + if self._secrets.get(label): + raise SecretAlreadyExistsError(f"Secret {label} already exists") + + secret = CachedSecret(self.charm, label) + secret.add_secret(content, relation) + self._secrets[label] = secret + return self._secrets[label] + + +# Base DataRelation + + +class DataRelation(Object, ABC): + """Base relation data mainpulation (abstract) class.""" def __init__(self, charm: CharmBase, relation_name: str) -> None: super().__init__(charm, relation_name) @@ -364,62 +595,612 @@ def __init__(self, charm: CharmBase, relation_name: str) -> None: self.relation_name = relation_name self.framework.observe( charm.on[relation_name].relation_changed, - self._on_relation_changed, + self._on_relation_changed_event, ) + self._jujuversion = None + self.secrets = SecretCache(self.charm) - def _diff(self, event: RelationChangedEvent) -> Diff: - """Retrieves the diff of the data in the relation changed databag. + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return [ + relation + for relation in self.charm.model.relations[self.relation_name] + if self._is_relation_active(relation) + ] - Args: - event: relation changed event. + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + if not self._jujuversion: + self._jujuversion = JujuVersion.from_environ() + return self._jujuversion.has_secrets - Returns: - a Diff instance containing the added, deleted and changed - keys from the event relation databag. - """ - return diff(event, self.local_app) + # Mandatory overrides for internal/helper methods @abstractmethod - def _on_relation_changed(self, event: RelationChangedEvent) -> None: + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the relation data has changed.""" raise NotImplementedError - def fetch_relation_data(self) -> dict: + @abstractmethod + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + raise NotImplementedError + + @abstractmethod + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation.""" + raise NotImplementedError + + @abstractmethod + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + # Internal helper methods + + @staticmethod + def _is_relation_active(relation: Relation): + """Whether the relation is active based on contained data.""" + try: + _ = repr(relation.data) + return True + except (RuntimeError, ModelError): + return False + + @staticmethod + def _is_secret_field(field: str) -> bool: + """Is the field in question a secret reference (URI) field or not?""" + return field.startswith(PROV_SECRET_PREFIX) + + @staticmethod + def _generate_secret_label( + relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{relation_name}.{relation_id}.{group_mapping.value}.secret" + + @staticmethod + def _generate_secret_field_name(group_mapping: SecretGroup) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{PROV_SECRET_PREFIX}{group_mapping.value}" + + def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: + """Retrieve the relation that belongs to a secret label.""" + contents = secret_label.split(".") + + if not (contents and len(contents) >= 3): + return + + contents.pop() # ".secret" at the end + contents.pop() # Group mapping + relation_id = contents.pop() + try: + relation_id = int(relation_id) + except ValueError: + return + + # In case '.' character appeared in relation name + relation_name = ".".join(contents) + + try: + return self.get_relation(relation_name, relation_id) + except ModelError: + return + + @staticmethod + def _group_secret_fields(secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + if group := SECRET_LABEL_MAP.get(key): + secret_fieldnames_grouped.setdefault(group, []).append(key) + else: + secret_fieldnames_grouped.setdefault(SecretGroup.EXTRA, []).append(key) + return secret_fieldnames_grouped + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Optional[Union[Set[str], List[str]]] = None, + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + if not secret_fields: + secret_fields = [] + + if (secret := self._get_relation_secret(relation.id, group)) and ( + secret_data := secret.get_content() + ): + return {k: v for k, v in secret_data.items() if k in secret_fields} + return {} + + @staticmethod + def _content_for_secret_group( + content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SecretGroup.EXTRA: + return { + k: v + for k, v in content.items() + if k in secret_fields and k not in SECRET_LABEL_MAP.keys() + } + + return { + k: v + for k, v in content.items() + if k in secret_fields and SECRET_LABEL_MAP.get(k) == group_mapping + } + + @juju_secrets_only + def _get_relation_secret_data( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[Dict[str, str]]: + """Retrieve contents of a Juju Secret that's been stored in the relation databag.""" + secret = self._get_relation_secret(relation_id, group_mapping, relation_name) + if secret: + return secret.get_content() + + # Core operations on Relation Fields manipulations (regardless whether the field is in the databag or in a secret) + # Internal functions to be called directly from transparent public interface functions (+closely related helpers) + + def _process_secret_fields( + self, + relation: Relation, + req_secret_fields: Optional[List[str]], + impacted_rel_fields: List[str], + operation: Callable, + *args, + **kwargs, + ) -> Tuple[Dict[str, str], Set[str]]: + """Isolate target secret fields of manipulation, and execute requested operation by Secret Group.""" + result = {} + + # If the relation started on a databag, we just stay on the databag + # (Rolling upgrades may result in a relation starting on databag, getting secrets enabled on-the-fly) + # self.local_app is sufficient to check (ignored if Requires, never has secrets -- works if Provides) + fallback_to_databag = ( + req_secret_fields + and self.local_unit.is_leader() + and set(req_secret_fields) & set(relation.data[self.local_app]) + ) + + normal_fields = set(impacted_rel_fields) + if req_secret_fields and self.secrets_enabled and not fallback_to_databag: + normal_fields = normal_fields - set(req_secret_fields) + secret_fields = set(impacted_rel_fields) - set(normal_fields) + + secret_fieldnames_grouped = self._group_secret_fields(list(secret_fields)) + + for group in secret_fieldnames_grouped: + # operation() should return nothing when all goes well + if group_result := operation(relation, group, secret_fields, *args, **kwargs): + # If "meaningful" data was returned, we take it. (Some 'operation'-s only return success/failure.) + if isinstance(group_result, dict): + result.update(group_result) + else: + # If it wasn't found as a secret, let's give it a 2nd chance as "normal" field + # Needed when Juju3 Requires meets Juju2 Provider + normal_fields |= set(secret_fieldnames_grouped[group]) + return (result, normal_fields) + + def _fetch_relation_data_without_secrets( + self, app: Application, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching databag contents when no secrets are involved. + + Since the Provider's databag is the only one holding secrest, we can apply + a simplified workflow to read the Require's side's databag. + This is used typically when the Provides side wants to read the Requires side's data, + or when the Requires side may want to read its own data. + """ + if app not in relation.data or not relation.data[app]: + return {} + + if fields: + return {k: relation.data[app][k] for k in fields if k in relation.data[app]} + else: + return dict(relation.data[app]) + + def _fetch_relation_data_with_secrets( + self, + app: Application, + req_secret_fields: Optional[List[str]], + relation: Relation, + fields: Optional[List[str]] = None, + ) -> Dict[str, str]: + """Fetching databag contents when secrets may be involved. + + This function has internal logic to resolve if a requested field may be "hidden" + within a Relation Secret, or directly available as a databag field. Typically + used to read the Provides side's databag (eigher by the Requires side, or by + Provides side itself). + """ + result = {} + normal_fields = [] + + if not fields: + if app not in relation.data or not relation.data[app]: + return {} + + all_fields = list(relation.data[app].keys()) + normal_fields = [field for field in all_fields if not self._is_secret_field(field)] + + # There must have been secrets there + if all_fields != normal_fields and req_secret_fields: + # So we assemble the full fields list (without 'secret-' fields) + fields = normal_fields + req_secret_fields + + if fields: + result, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._get_group_secret_contents + ) + + # Processing "normal" fields. May include leftover from what we couldn't retrieve as a secret. + # (Typically when Juju3 Requires meets Juju2 Provides) + if normal_fields: + result.update( + self._fetch_relation_data_without_secrets(app, relation, list(normal_fields)) + ) + return result + + def _update_relation_data_without_secrets( + self, app: Application, relation: Relation, data: Dict[str, str] + ) -> None: + """Updating databag contents when no secrets are involved.""" + if app not in relation.data or relation.data[app] is None: + return + + if any(self._is_secret_field(key) for key in data.keys()): + raise SecretsIllegalUpdateError("Can't update secret {key}.") + + if relation: + relation.data[app].update(data) + + def _delete_relation_data_without_secrets( + self, app: Application, relation: Relation, fields: List[str] + ) -> None: + """Remove databag fields 'fields' from Relation.""" + if app not in relation.data or not relation.data[app]: + return + + for field in fields: + try: + relation.data[app].pop(field) + except KeyError: + logger.debug( + "Non-existing field was attempted to be removed from the databag %s, %s", + str(relation.id), + str(field), + ) + pass + + # Public interface methods + # Handling Relation Fields seamlessly, regardless if in databag or a Juju Secret + + def get_relation(self, relation_name, relation_id) -> Relation: + """Safe way of retrieving a relation.""" + relation = self.charm.model.get_relation(relation_name, relation_id) + + if not relation: + raise DataInterfacesError( + "Relation %s %s couldn't be retrieved", relation_name, relation_id + ) + + return relation + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: """Retrieves data from relation. This function can be used to retrieve data from a relation in the charm code when outside an event callback. + Function cannot be used in `*-relation-broken` events and will raise an exception. Returns: a dict of the values stored in the relation data bag - for all relation instances (indexed by the relation id). + for all relation instances (indexed by the relation ID). """ + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + data = {} - for relation in self.relations: - data[relation.id] = { - key: value for key, value in relation.data[relation.app].items() if key != "data" - } + for relation in relations: + if not relation_ids or (relation_ids and relation.id in relation_ids): + data[relation.id] = self._fetch_specific_relation_data(relation, fields) return data - def _update_relation_data(self, relation_id: int, data: dict) -> None: - """Updates a set of key-value pairs in the relation. + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data.""" + return ( + self.fetch_relation_data([relation_id], [field], relation_name) + .get(relation_id, {}) + .get(field) + ) - This function writes in the application data bag, therefore, - only the leader unit can call it. + @leader_only + def fetch_my_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Optional[Dict[int, Dict[str, str]]]: + """Fetch data of the 'owner' (or 'this app') side of the relation. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or relation.id in relation_ids: + data[relation.id] = self._fetch_my_specific_relation_data(relation, fields) + return data + + @leader_only + def fetch_my_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data -- owner side. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if relation_data := self.fetch_my_relation_data([relation_id], [field], relation_name): + return relation_data.get(relation_id, {}).get(field) + + @leader_only + def update_relation_data(self, relation_id: int, data: dict) -> None: + """Update the data within the relation.""" + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._update_relation_data(relation, data) + + @leader_only + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """Remove field from the relation.""" + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._delete_relation_data(relation, fields) + + +# Base DataProvides and DataRequires + + +class DataProvides(DataRelation): + """Base provides-side of the data products relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + super().__init__(charm, relation_name) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. Args: - relation_id: the identifier for a particular relation. - data: dict containing the key-value pairs - that should be updated in the relation. + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. """ - if self.local_unit.is_leader(): - relation = self.charm.model.get_relation(self.relation_name, relation_id) - relation.data[self.local_app].update(data) + return diff(event, self.local_app) - @property - def relations(self) -> List[Relation]: - """The list of Relation instances associated with this relation_name.""" - return list(self.charm.model.relations[self.relation_name]) + # Private methods handling secrets + + @juju_secrets_only + def _add_relation_secret( + self, relation: Relation, content: Dict[str, str], group_mapping: SecretGroup + ) -> bool: + """Add a new Juju Secret that will be registered in the relation databag.""" + secret_field = self._generate_secret_field_name(group_mapping) + if relation.data[self.local_app].get(secret_field): + logging.error("Secret for relation %s already exists, not adding again", relation.id) + return False + + label = self._generate_secret_label(self.relation_name, relation.id, group_mapping) + secret = self.secrets.add(label, content, relation) + + # According to lint we may not have a Secret ID + if secret.meta and secret.meta.id: + relation.data[self.local_app][secret_field] = secret.meta.id + + # Return the content that was added + return True + + @juju_secrets_only + def _update_relation_secret( + self, relation: Relation, content: Dict[str, str], group_mapping: SecretGroup + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group_mapping) + + if not secret: + logging.error("Can't update secret for relation %s", relation.id) + return False + + old_content = secret.get_content() + full_content = copy.deepcopy(old_content) + full_content.update(content) + secret.set_content(full_content) + + # Return True on success + return True + + def _add_or_update_relation_secrets( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + ) -> bool: + """Update contents for Secret group. If the Secret doesn't exist, create it.""" + secret_content = self._content_for_secret_group(data, secret_fields, group) + if self._get_relation_secret(relation.id, group): + return self._update_relation_secret(relation, secret_content, group) + else: + return self._add_relation_secret(relation, secret_content, group) + + @juju_secrets_only + def _delete_relation_secret( + self, relation: Relation, group: SecretGroup, secret_fields: List[str], fields: List[str] + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group) + + if not secret: + logging.error("Can't delete secret for relation %s", str(relation.id)) + return False + + old_content = secret.get_content() + new_content = copy.deepcopy(old_content) + for field in fields: + try: + new_content.pop(field) + except KeyError: + logging.error( + "Non-existing secret was attempted to be removed %s, %s", + str(relation.id), + str(field), + ) + return False + + secret.set_content(new_content) + + # Remove secret from the relation if it's fully gone + if not new_content: + field = self._generate_secret_field_name(group) + try: + relation.data[self.local_app].pop(field) + except KeyError: + pass + + # Return the content that was removed + return True + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + if secret := self.secrets.get(label): + return secret + + relation = self.charm.model.get_relation(relation_name, relation_id) + if not relation: + return + + secret_field = self._generate_secret_field_name(group_mapping) + if secret_uri := relation.data[self.local_app].get(secret_field): + return self.secrets.get(label, secret_uri) + + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching relation data for Provides. + + NOTE: Since all secret fields are in the Provides side of the databag, we don't need to worry about that + """ + if not relation.app: + return {} + + return self._fetch_relation_data_without_secrets(relation.app, relation, fields) + + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> dict: + """Fetching our own relation data.""" + secret_fields = None + if relation.app: + secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + return self._fetch_relation_data_with_secrets( + self.local_app, + secret_fields, + relation, + fields, + ) + + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Set values for fields not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, + req_secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.local_app, relation, normal_content) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete fields from the Relation not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.local_app, relation, list(normal_fields)) + + # Public methods - "native" def set_credentials(self, relation_id: int, username: str, password: str) -> None: """Set credentials. @@ -432,13 +1213,7 @@ def set_credentials(self, relation_id: int, username: str, password: str) -> Non username: user that was created. password: password of the created user. """ - self._update_relation_data( - relation_id, - { - "username": username, - "password": password, - }, - ) + self.update_relation_data(relation_id, {"username": username, "password": password}) def set_tls(self, relation_id: int, tls: str) -> None: """Set whether TLS is enabled. @@ -447,7 +1222,7 @@ def set_tls(self, relation_id: int, tls: str) -> None: relation_id: the identifier for a particular relation. tls: whether tls is enabled (True or False). """ - self._update_relation_data(relation_id, {"tls": tls}) + self.update_relation_data(relation_id, {"tls": tls}) def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: """Set the TLS CA in the application relation databag. @@ -456,108 +1231,98 @@ def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: relation_id: the identifier for a particular relation. tls_ca: TLS certification authority. """ - self._update_relation_data(relation_id, {"tls_ca": tls_ca}) + self.update_relation_data(relation_id, {"tls-ca": tls_ca}) -class DataRequires(Object, ABC): +class DataRequires(DataRelation): """Requires-side of the relation.""" + SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] + def __init__( self, charm, relation_name: str, - extra_user_roles: str = None, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], ): """Manager of base client relations.""" super().__init__(charm, relation_name) - self.charm = charm self.extra_user_roles = extra_user_roles - self.local_app = self.charm.model.app - self.local_unit = self.charm.unit - self.relation_name = relation_name + self._secret_fields = list(self.SECRET_FIELDS) + if additional_secret_fields: + self._secret_fields += additional_secret_fields + self.framework.observe( - self.charm.on[relation_name].relation_joined, self._on_relation_joined_event + self.charm.on[relation_name].relation_created, self._on_relation_created_event ) self.framework.observe( - self.charm.on[relation_name].relation_changed, self._on_relation_changed_event + charm.on.secret_changed, + self._on_secret_changed_event, ) - @abstractmethod - def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: - """Event emitted when the application joins the relation.""" - raise NotImplementedError - - @abstractmethod - def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: - raise NotImplementedError + @property + def secret_fields(self) -> Optional[List[str]]: + """Local access to secrets field, in case they are being used.""" + if self.secrets_enabled: + return self._secret_fields - def fetch_relation_data(self) -> dict: - """Retrieves data from relation. + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. - This function can be used to retrieve data from a relation - in the charm code when outside an event callback. - Function cannot be used in `*-relation-broken` events and will raise an exception. + Args: + event: relation changed event. Returns: - a dict of the values stored in the relation data bag - for all relation instances (indexed by the relation ID). + a Diff instance containing the added, deleted and changed + keys from the event relation databag. """ - data = {} - for relation in self.relations: - data[relation.id] = { - key: value for key, value in relation.data[relation.app].items() if key != "data" - } - return data + return diff(event, self.local_unit) - def _update_relation_data(self, relation_id: int, data: dict) -> None: - """Updates a set of key-value pairs in the relation. + # Internal helper functions - This function writes in the application data bag, therefore, - only the leader unit can call it. + def _register_secret_to_relation( + self, relation_name: str, relation_id: int, secret_id: str, group: SecretGroup + ): + """Fetch secrets and apply local label on them. - Args: - relation_id: the identifier for a particular relation. - data: dict containing the key-value pairs - that should be updated in the relation. + [MAGIC HERE] + If we fetch a secret using get_secret(id=, label=), + then will be "stuck" on the Secret object, whenever it may + appear (i.e. as an event attribute, or fetched manually) on future occasions. + + This will allow us to uniquely identify the secret on Provides side (typically on + 'secret-changed' events), and map it to the corresponding relation. """ - if self.local_unit.is_leader(): - relation = self.charm.model.get_relation(self.relation_name, relation_id) - relation.data[self.local_app].update(data) + label = self._generate_secret_label(relation_name, relation_id, group) - def _diff(self, event: RelationChangedEvent) -> Diff: - """Retrieves the diff of the data in the relation changed databag. + # Fetchin the Secret's meta information ensuring that it's locally getting registered with + CachedSecret(self.charm, label, secret_id).meta - Args: - event: relation changed event. + def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): + """Make sure that secrets of the provided list are locally 'registered' from the databag. - Returns: - a Diff instance containing the added, deleted and changed - keys from the event relation databag. + More on 'locally registered' magic is described in _register_secret_to_relation() method """ - return diff(event, self.local_unit) + if not relation.app: + return - @property - def relations(self) -> List[Relation]: - """The list of Relation instances associated with this relation_name.""" - return [ - relation - for relation in self.charm.model.relations[self.relation_name] - if self._is_relation_active(relation) - ] + for group in SecretGroup: + secret_field = self._generate_secret_field_name(group) + if secret_field in params_name_list: + if secret_uri := relation.data[relation.app].get(secret_field): + self._register_secret_to_relation( + relation.name, relation.id, secret_uri, group + ) - @staticmethod - def _is_relation_active(relation: Relation): - try: - _ = repr(relation.data) - return True - except RuntimeError: + def _is_resource_created_for_relation(self, relation: Relation) -> bool: + if not relation.app: return False - @staticmethod - def _is_resource_created_for_relation(relation: Relation): - return ( - "username" in relation.data[relation.app] and "password" in relation.data[relation.app] + data = self.fetch_relation_data([relation.id], ["username", "password"]).get( + relation.id, {} ) + return bool(data.get("username")) and bool(data.get("password")) def is_resource_created(self, relation_id: Optional[int] = None) -> bool: """Check if the resource has been created. @@ -586,15 +1351,81 @@ def is_resource_created(self, relation_id: Optional[int] = None) -> bool: else: return ( all( - [ - self._is_resource_created_for_relation(relation) - for relation in self.relations - ] + self._is_resource_created_for_relation(relation) for relation in self.relations ) if self.relations else False ) + # Event handlers + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the relation is created.""" + if not self.local_unit.is_leader(): + return + + if self.secret_fields: + set_encoded_field( + event.relation, self.charm.app, REQ_SECRET_FIELDS, self.secret_fields + ) + + @abstractmethod + def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group) + return self.secrets.get(label) + + def _fetch_specific_relation_data( + self, relation, fields: Optional[List[str]] = None + ) -> Dict[str, str]: + """Fetching Requires data -- that may include secrets.""" + if not relation.app: + return {} + return self._fetch_relation_data_with_secrets( + relation.app, self.secret_fields, relation, fields + ) + + def _fetch_my_specific_relation_data(self, relation, fields: Optional[List[str]]) -> dict: + """Fetching our own relation data.""" + return self._fetch_relation_data_without_secrets(self.local_app, relation, fields) + + def _update_relation_data(self, relation: Relation, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + return self._update_relation_data_without_secrets(self.local_app, relation, data) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Deletes a set of fields from the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + fields: list containing the field names that should be removed from the relation. + """ + return self._delete_relation_data_without_secrets(self.local_app, relation, fields) + # General events @@ -605,30 +1436,108 @@ class ExtraRoleEvent(RelationEvent): @property def extra_user_roles(self) -> Optional[str]: """Returns the extra user roles that were requested.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("extra-user-roles") class AuthenticationEvent(RelationEvent): - """Base class for authentication fields for events.""" + """Base class for authentication fields for events. + + The amount of logic added here is not ideal -- but this was the only way to preserve + the interface when moving to Juju Secrets + """ + + @property + def _secrets(self) -> dict: + """Caching secrets to avoid fetching them each time a field is referrd. + + DON'T USE the encapsulated helper variable outside of this function + """ + if not hasattr(self, "_cached_secrets"): + self._cached_secrets = {} + return self._cached_secrets + + @property + def _jujuversion(self) -> JujuVersion: + """Caching jujuversion to avoid a Juju call on each field evaluation. + + DON'T USE the encapsulated helper variable outside of this function + """ + if not hasattr(self, "_cached_jujuversion"): + self._cached_jujuversion = None + if not self._cached_jujuversion: + self._cached_jujuversion = JujuVersion.from_environ() + return self._cached_jujuversion + + def _get_secret(self, group) -> Optional[Dict[str, str]]: + """Retrieveing secrets.""" + if not self.app: + return + if not self._secrets.get(group): + self._secrets[group] = None + secret_field = f"{PROV_SECRET_PREFIX}{group}" + if secret_uri := self.relation.data[self.app].get(secret_field): + secret = self.framework.model.get_secret(id=secret_uri) + self._secrets[group] = secret.get_content() + return self._secrets[group] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + return self._jujuversion.has_secrets @property def username(self) -> Optional[str]: """Returns the created username.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("username") + return self.relation.data[self.relation.app].get("username") @property def password(self) -> Optional[str]: """Returns the password for the created user.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("password") + return self.relation.data[self.relation.app].get("password") @property def tls(self) -> Optional[str]: """Returns whether TLS is configured.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls") + return self.relation.data[self.relation.app].get("tls") @property def tls_ca(self) -> Optional[str]: """Returns TLS CA.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls-ca") + return self.relation.data[self.relation.app].get("tls-ca") @@ -641,6 +1550,9 @@ class DatabaseProvidesEvent(RelationEvent): @property def database(self) -> Optional[str]: """Returns the database that was requested.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("database") @@ -663,16 +1575,33 @@ class DatabaseRequiresEvent(RelationEvent): @property def database(self) -> Optional[str]: """Returns the database name.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("database") @property def endpoints(self) -> Optional[str]: - """Returns a comma separated list of read/write endpoints.""" + """Returns a comma separated list of read/write endpoints. + + In VM charms, this is the primary's address. + In kubernetes charms, this is the service to the primary pod. + """ + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("endpoints") @property def read_only_endpoints(self) -> Optional[str]: - """Returns a comma separated list of read only endpoints.""" + """Returns a comma separated list of read only endpoints. + + In VM charms, this is the address of all the secondary instances. + In kubernetes charms, this is the service to all replica pod instances. + """ + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("read-only-endpoints") @property @@ -681,6 +1610,9 @@ def replset(self) -> Optional[str]: MongoDB only. """ + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("replset") @property @@ -689,6 +1621,9 @@ def uris(self) -> Optional[str]: MongoDB, Redis, OpenSearch. """ + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("uris") @property @@ -697,6 +1632,9 @@ def version(self) -> Optional[str]: Version as informed by the database daemon. """ + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("version") @@ -729,24 +1667,25 @@ class DatabaseRequiresEvents(CharmEvents): class DatabaseProvides(DataProvides): """Provider-side of the database relations.""" - on = DatabaseProvidesEvents() + on = DatabaseProvidesEvents() # pyright: ignore [reportGeneralTypeIssues] def __init__(self, charm: CharmBase, relation_name: str) -> None: super().__init__(charm, relation_name) - def _on_relation_changed(self, event: RelationChangedEvent) -> None: + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the relation has changed.""" - # Only the leader should handle this event. + # Leader only if not self.local_unit.is_leader(): return - # Check which data has changed to emit customs events. diff = self._diff(event) # Emit a database requested event if the setup key (database name and optional # extra user roles) was added to the relation databag by the application. if "database" in diff.added: - self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit) + getattr(self.on, "database_requested").emit( + event.relation, app=event.app, unit=event.unit + ) def set_database(self, relation_id: int, database_name: str) -> None: """Set database name. @@ -758,7 +1697,7 @@ def set_database(self, relation_id: int, database_name: str) -> None: relation_id: the identifier for a particular relation. database_name: database name. """ - self._update_relation_data(relation_id, {"database": database_name}) + self.update_relation_data(relation_id, {"database": database_name}) def set_endpoints(self, relation_id: int, connection_strings: str) -> None: """Set database primary connections. @@ -766,11 +1705,15 @@ def set_endpoints(self, relation_id: int, connection_strings: str) -> None: This function writes in the application data bag, therefore, only the leader unit can call it. + In VM charms, only the primary's address should be passed as an endpoint. + In kubernetes charms, the service endpoint to the primary pod should be + passed as an endpoint. + Args: relation_id: the identifier for a particular relation. connection_strings: database hosts and ports comma separated list. """ - self._update_relation_data(relation_id, {"endpoints": connection_strings}) + self.update_relation_data(relation_id, {"endpoints": connection_strings}) def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None: """Set database replicas connection strings. @@ -782,7 +1725,7 @@ def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> relation_id: the identifier for a particular relation. connection_strings: database hosts and ports comma separated list. """ - self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) + self.update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) def set_replset(self, relation_id: int, replset: str) -> None: """Set replica set name in the application relation databag. @@ -793,7 +1736,7 @@ def set_replset(self, relation_id: int, replset: str) -> None: relation_id: the identifier for a particular relation. replset: replica set name. """ - self._update_relation_data(relation_id, {"replset": replset}) + self.update_relation_data(relation_id, {"replset": replset}) def set_uris(self, relation_id: int, uris: str) -> None: """Set the database connection URIs in the application relation databag. @@ -804,7 +1747,7 @@ def set_uris(self, relation_id: int, uris: str) -> None: relation_id: the identifier for a particular relation. uris: connection URIs. """ - self._update_relation_data(relation_id, {"uris": uris}) + self.update_relation_data(relation_id, {"uris": uris}) def set_version(self, relation_id: int, version: str) -> None: """Set the database version in the application relation databag. @@ -813,24 +1756,25 @@ def set_version(self, relation_id: int, version: str) -> None: relation_id: the identifier for a particular relation. version: database version. """ - self._update_relation_data(relation_id, {"version": version}) + self.update_relation_data(relation_id, {"version": version}) class DatabaseRequires(DataRequires): """Requires-side of the database relation.""" - on = DatabaseRequiresEvents() + on = DatabaseRequiresEvents() # pyright: ignore [reportGeneralTypeIssues] def __init__( self, charm, relation_name: str, database_name: str, - extra_user_roles: str = None, - relations_aliases: List[str] = None, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], ): """Manager of database client relations.""" - super().__init__(charm, relation_name, extra_user_roles) + super().__init__(charm, relation_name, extra_user_roles, additional_secret_fields) self.database = database_name self.relations_aliases = relations_aliases @@ -855,6 +1799,10 @@ def __init__( DatabaseReadOnlyEndpointsChangedEvent, ) + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + def _assign_relation_alias(self, relation_id: int) -> None: """Assigns an alias to a relation. @@ -869,11 +1817,8 @@ def _assign_relation_alias(self, relation_id: int) -> None: # Return if an alias was already assigned to this relation # (like when there are more than one unit joining the relation). - if ( - self.charm.model.get_relation(self.relation_name, relation_id) - .data[self.local_unit] - .get("alias") - ): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + if relation and relation.data[self.local_unit].get("alias"): return # Retrieve the available aliases (the ones that weren't assigned to any relation). @@ -886,7 +1831,13 @@ def _assign_relation_alias(self, relation_id: int) -> None: # Set the alias in the unit relation databag of the specific relation. relation = self.charm.model.get_relation(self.relation_name, relation_id) - relation.data[self.local_unit].update({"alias": available_aliases[0]}) + if relation: + relation.data[self.local_unit].update({"alias": available_aliases[0]}) + + # We need to set relation alias also on the application level so, + # it will be accessible in show-unit juju command, executed for a consumer application unit + if self.local_unit.is_leader(): + self.update_relation_data(relation_id, {"alias": available_aliases[0]}) def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: """Emit an aliased event to a particular relation if it has an alias. @@ -915,15 +1866,69 @@ def _get_relation_alias(self, relation_id: int) -> Optional[str]: return relation.data[self.local_unit].get("alias") return None - def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: - """Event emitted when the application joins the database relation.""" + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_id = self.relations[relation_index].id + host = self.fetch_relation_field(relation_id, "endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + + content = self.fetch_relation_data([relation_id], ["username", "password"]).get( + relation_id, {} + ) + user = content.get("username") + password = content.get("password") + + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute( + "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) + ) + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the database relation is created.""" + super()._on_relation_created_event(event) + # If relations aliases were provided, assign one to the relation. self._assign_relation_alias(event.relation.id) # Sets both database and extra user roles in the relation # if the roles are provided. Otherwise, sets only the database. + if not self.local_unit.is_leader(): + return + if self.extra_user_roles: - self._update_relation_data( + self.update_relation_data( event.relation.id, { "database": self.database, @@ -931,19 +1936,28 @@ def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: }, ) else: - self._update_relation_data(event.relation.id, {"database": self.database}) + self.update_relation_data(event.relation.id, {"database": self.database}) def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the database relation has changed.""" # Check which data has changed to emit customs events. diff = self._diff(event) + # Register all new secrets with their labels + if any(newval for newval in diff.added if self._is_secret_field(newval)): + self._register_secrets_to_relation(event.relation, diff.added) + # Check if the database is created # (the database charm shared the credentials). - if "username" in diff.added and "password" in diff.added: + secret_field_user = self._generate_secret_field_name(SecretGroup.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: # Emit the default event (the one without an alias). logger.info("database created at %s", datetime.now()) - self.on.database_created.emit(event.relation, app=event.app, unit=event.unit) + getattr(self.on, "database_created").emit( + event.relation, app=event.app, unit=event.unit + ) # Emit the aliased event (if any). self._emit_aliased_event(event, "database_created") @@ -957,7 +1971,9 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: if "endpoints" in diff.added or "endpoints" in diff.changed: # Emit the default event (the one without an alias). logger.info("endpoints changed on %s", datetime.now()) - self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # Emit the aliased event (if any). self._emit_aliased_event(event, "endpoints_changed") @@ -971,7 +1987,7 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: # Emit the default event (the one without an alias). logger.info("read-only-endpoints changed on %s", datetime.now()) - self.on.read_only_endpoints_changed.emit( + getattr(self.on, "read_only_endpoints_changed").emit( event.relation, app=event.app, unit=event.unit ) @@ -988,11 +2004,17 @@ class KafkaProvidesEvent(RelationEvent): @property def topic(self) -> Optional[str]: """Returns the topic that was requested.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("topic") @property def consumer_group_prefix(self) -> Optional[str]: """Returns the consumer-group-prefix that was requested.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("consumer-group-prefix") @@ -1015,21 +2037,33 @@ class KafkaRequiresEvent(RelationEvent): @property def topic(self) -> Optional[str]: """Returns the topic.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("topic") @property def bootstrap_server(self) -> Optional[str]: - """Returns a a comma-seperated list of broker uris.""" + """Returns a comma-separated list of broker uris.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("endpoints") @property def consumer_group_prefix(self) -> Optional[str]: """Returns the consumer-group-prefix.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("consumer-group-prefix") @property def zookeeper_uris(self) -> Optional[str]: """Returns a comma separated list of Zookeeper uris.""" + if not self.relation.app: + return None + return self.relation.data[self.relation.app].get("zookeeper-uris") @@ -1057,14 +2091,14 @@ class KafkaRequiresEvents(CharmEvents): class KafkaProvides(DataProvides): """Provider-side of the Kafka relation.""" - on = KafkaProvidesEvents() + on = KafkaProvidesEvents() # pyright: ignore [reportGeneralTypeIssues] def __init__(self, charm: CharmBase, relation_name: str) -> None: super().__init__(charm, relation_name) - def _on_relation_changed(self, event: RelationChangedEvent) -> None: + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the relation has changed.""" - # Only the leader should handle this event. + # Leader only if not self.local_unit.is_leader(): return @@ -1074,7 +2108,9 @@ def _on_relation_changed(self, event: RelationChangedEvent) -> None: # Emit a topic requested event if the setup key (topic name and optional # extra user roles) was added to the relation databag by the application. if "topic" in diff.added: - self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit) + getattr(self.on, "topic_requested").emit( + event.relation, app=event.app, unit=event.unit + ) def set_topic(self, relation_id: int, topic: str) -> None: """Set topic name in the application relation databag. @@ -1083,7 +2119,7 @@ def set_topic(self, relation_id: int, topic: str) -> None: relation_id: the identifier for a particular relation. topic: the topic name. """ - self._update_relation_data(relation_id, {"topic": topic}) + self.update_relation_data(relation_id, {"topic": topic}) def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: """Set the bootstrap server in the application relation databag. @@ -1092,7 +2128,7 @@ def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: relation_id: the identifier for a particular relation. bootstrap_server: the bootstrap server address. """ - self._update_relation_data(relation_id, {"endpoints": bootstrap_server}) + self.update_relation_data(relation_id, {"endpoints": bootstrap_server}) def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None: """Set the consumer group prefix in the application relation databag. @@ -1101,22 +2137,22 @@ def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str relation_id: the identifier for a particular relation. consumer_group_prefix: the consumer group prefix string. """ - self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) + self.update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: """Set the zookeeper uris in the application relation databag. Args: relation_id: the identifier for a particular relation. - zookeeper_uris: comma-seperated list of ZooKeeper server uris. + zookeeper_uris: comma-separated list of ZooKeeper server uris. """ - self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) + self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) class KafkaRequires(DataRequires): """Requires-side of the Kafka relation.""" - on = KafkaRequiresEvents() + on = KafkaRequiresEvents() # pyright: ignore [reportGeneralTypeIssues] def __init__( self, @@ -1125,23 +2161,45 @@ def __init__( topic: str, extra_user_roles: Optional[str] = None, consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], ): """Manager of Kafka client relations.""" # super().__init__(charm, relation_name) - super().__init__(charm, relation_name, extra_user_roles) + super().__init__(charm, relation_name, extra_user_roles, additional_secret_fields) self.charm = charm self.topic = topic self.consumer_group_prefix = consumer_group_prefix or "" - def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: - """Event emitted when the application joins the Kafka relation.""" + @property + def topic(self): + """Topic to use in Kafka.""" + return self._topic + + @topic.setter + def topic(self, value): + # Avoid wildcards + if value == "*": + raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") + self._topic = value + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the Kafka relation is created.""" + super()._on_relation_created_event(event) + + if not self.local_unit.is_leader(): + return + # Sets topic, extra user roles, and "consumer-group-prefix" in the relation relation_data = { f: getattr(self, f.replace("-", "_"), "") for f in ["consumer-group-prefix", "extra-user-roles", "topic"] } - self._update_relation_data(event.relation.id, relation_data) + self.update_relation_data(event.relation.id, relation_data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the Kafka relation has changed.""" @@ -1150,21 +2208,235 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: # Check if the topic is created # (the Kafka charm shared the credentials). - if "username" in diff.added and "password" in diff.added: + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self._is_secret_field(newval)): + self._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self._generate_secret_field_name(SecretGroup.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: # Emit the default event (the one without an alias). logger.info("topic created at %s", datetime.now()) - self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit) + getattr(self.on, "topic_created").emit(event.relation, app=event.app, unit=event.unit) # To avoid unnecessary application restarts do not trigger # “endpoints_changed“ event if “topic_created“ is triggered. return - # Emit an endpoints (bootstap-server) changed event if the Kafka endpoints + # Emit an endpoints (bootstrap-server) changed event if the Kafka endpoints # added or changed this info in the relation databag. if "endpoints" in diff.added or "endpoints" in diff.changed: # Emit the default event (the one without an alias). logger.info("endpoints changed on %s", datetime.now()) - self.on.bootstrap_server_changed.emit( + getattr(self.on, "bootstrap_server_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +# Opensearch related events + + +class OpenSearchProvidesEvent(RelationEvent): + """Base class for OpenSearch events.""" + + @property + def index(self) -> Optional[str]: + """Returns the index that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("index") + + +class IndexRequestedEvent(OpenSearchProvidesEvent, ExtraRoleEvent): + """Event emitted when a new index is requested for use on this relation.""" + + +class OpenSearchProvidesEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that OpenSearch can emit. + """ + + index_requested = EventSource(IndexRequestedEvent) + + +class OpenSearchRequiresEvent(DatabaseRequiresEvent): + """Base class for OpenSearch requirer events.""" + + +class IndexCreatedEvent(AuthenticationEvent, OpenSearchRequiresEvent): + """Event emitted when a new index is created for use on this relation.""" + + +class OpenSearchRequiresEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that the opensearch requirer can emit. + """ + + index_created = EventSource(IndexCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + authentication_updated = EventSource(AuthenticationEvent) + + +# OpenSearch Provides and Requires Objects + + +class OpenSearchProvides(DataProvides): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() # pyright: ignore[reportGeneralTypeIssues] + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + super().__init__(charm, relation_name) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + getattr(self.on, "index_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + def set_index(self, relation_id: int, index: str) -> None: + """Set the index in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + index: the index as it is _created_ on the provider charm. This needn't match the + requested index, and can be used to present a different index name if, for example, + the requested index is invalid. + """ + self.update_relation_data(relation_id, {"index": index}) + + def set_endpoints(self, relation_id: int, endpoints: str) -> None: + """Set the endpoints in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + endpoints: the endpoint addresses for opensearch nodes. + """ + self.update_relation_data(relation_id, {"endpoints": endpoints}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the opensearch version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + +class OpenSearchRequires(DataRequires): + """Requires-side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() # pyright: ignore[reportGeneralTypeIssues] + + def __init__( + self, + charm, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of OpenSearch client relations.""" + super().__init__(charm, relation_name, extra_user_roles, additional_secret_fields) + self.charm = charm + self.index = index + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the OpenSearch relation is created.""" + super()._on_relation_created_event(event) + + if not self.local_unit.is_leader(): + return + + # Sets both index and extra user roles in the relation if the roles are provided. + # Otherwise, sets only the index. + data = {"index": self.index} + if self.extra_user_roles: + data["extra-user-roles"] = self.extra_user_roles + + self.update_relation_data(event.relation.id, data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + if not event.secret.label: + return + + relation = self._relation_from_secret_label(event.secret.label) + if not relation: + logging.info( + f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" + ) + return + + if relation.app == self.charm.app: + logging.info("Secret changed event ignored for Secret Owner") + + remote_unit = None + for unit in relation.units: + if unit.app != self.charm.app: + remote_unit = unit + + logger.info("authentication updated") + getattr(self.on, "authentication_updated").emit( + relation, app=relation.app, unit=remote_unit + ) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the OpenSearch relation has changed. + + This event triggers individual custom events depending on the changing relation. + """ + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self._is_secret_field(newval)): + self._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self._generate_secret_field_name(SecretGroup.USER) + secret_field_tls = self._generate_secret_field_name(SecretGroup.TLS) + updates = {"username", "password", "tls", "tls-ca", secret_field_user, secret_field_tls} + if len(set(diff._asdict().keys()) - updates) < len(diff): + logger.info("authentication updated at: %s", datetime.now()) + getattr(self.on, "authentication_updated").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Check if the index is created + # (the OpenSearch charm shares the credentials). + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("index created at: %s", datetime.now()) + getattr(self.on, "index_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “index_created“ is triggered. + return + + # Emit a endpoints changed event if the OpenSearch application added or changed this info + # in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( event.relation, app=event.app, unit=event.unit ) # here check if this is the right design return diff --git a/lib/charms/zookeeper/v0/client.py b/lib/charms/zookeeper/v0/client.py index e7f03cc5..f65c6910 100644 --- a/lib/charms/zookeeper/v0/client.py +++ b/lib/charms/zookeeper/v0/client.py @@ -27,7 +27,7 @@ ```python def update_cluster(new_members: List[str], event: EventBase) -> None: - + try: zk = ZooKeeperManager( hosts=["10.141.73.20", "10.141.73.21"], @@ -35,12 +35,12 @@ def update_cluster(new_members: List[str], event: EventBase) -> None: username="super", password="password" ) - + current_quorum_members = zk.server_members servers_to_remove = list(current_quorum_members - new_members) zk.remove_members(servers_to_remove) - + servers_to_add = sorted(new_members - current_quorum_members) zk.add_members(servers_to_add) diff --git a/metadata.yaml b/metadata.yaml index e0c4e4a1..1e412da9 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -26,6 +26,7 @@ peers: requires: zookeeper: interface: zookeeper + limit: 1 certificates: interface: tls-certificates limit: 1 diff --git a/poetry.lock b/poetry.lock index c41470d1..c3ce41d8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,9 +1,10 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. [[package]] name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" +category = "dev" optional = false python-versions = "*" files = [ @@ -15,6 +16,7 @@ files = [ name = "asttokens" version = "2.4.1" description = "Annotate AST trees with source code positions" +category = "dev" optional = false python-versions = "*" files = [ @@ -31,26 +33,29 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] [[package]] name = "attrs" -version = "23.1.0" +version = "23.2.0" description = "Classes Without Boilerplate" +category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, - {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, ] [package.extras] cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]", "pre-commit"] +dev = ["attrs[tests]", "pre-commit"] docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] [[package]] name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" +category = "dev" optional = false python-versions = "*" files = [ @@ -60,27 +65,39 @@ files = [ [[package]] name = "bcrypt" -version = "4.1.1" +version = "4.1.2" description = "Modern password hashing for your software and your servers" +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "bcrypt-4.1.1-cp37-abi3-macosx_13_0_universal2.whl", hash = "sha256:2e197534c884336f9020c1f3a8efbaab0aa96fc798068cb2da9c671818b7fbb0"}, - {file = "bcrypt-4.1.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d573885b637815a7f3a3cd5f87724d7d0822da64b0ab0aa7f7c78bae534e86dc"}, - {file = "bcrypt-4.1.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bab33473f973e8058d1b2df8d6e095d237c49fbf7a02b527541a86a5d1dc4444"}, - {file = "bcrypt-4.1.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fb931cd004a7ad36a89789caf18a54c20287ec1cd62161265344b9c4554fdb2e"}, - {file = "bcrypt-4.1.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:12f40f78dcba4aa7d1354d35acf45fae9488862a4fb695c7eeda5ace6aae273f"}, - {file = "bcrypt-4.1.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2ade10e8613a3b8446214846d3ddbd56cfe9205a7d64742f0b75458c868f7492"}, - {file = "bcrypt-4.1.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f33b385c3e80b5a26b3a5e148e6165f873c1c202423570fdf45fe34e00e5f3e5"}, - {file = "bcrypt-4.1.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:755b9d27abcab678e0b8fb4d0abdebeea1f68dd1183b3f518bad8d31fa77d8be"}, - {file = "bcrypt-4.1.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a7a7b8a87e51e5e8ca85b9fdaf3a5dc7aaf123365a09be7a27883d54b9a0c403"}, - {file = "bcrypt-4.1.1-cp37-abi3-win32.whl", hash = "sha256:3d6c4e0d6963c52f8142cdea428e875042e7ce8c84812d8e5507bd1e42534e07"}, - {file = "bcrypt-4.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:14d41933510717f98aac63378b7956bbe548986e435df173c841d7f2bd0b2de7"}, - {file = "bcrypt-4.1.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:24c2ebd287b5b11016f31d506ca1052d068c3f9dc817160628504690376ff050"}, - {file = "bcrypt-4.1.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:476aa8e8aca554260159d4c7a97d6be529c8e177dbc1d443cb6b471e24e82c74"}, - {file = "bcrypt-4.1.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:12611c4b0a8b1c461646228344784a1089bc0c49975680a2f54f516e71e9b79e"}, - {file = "bcrypt-4.1.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c6450538a0fc32fb7ce4c6d511448c54c4ff7640b2ed81badf9898dcb9e5b737"}, - {file = "bcrypt-4.1.1.tar.gz", hash = "sha256:df37f5418d4f1cdcff845f60e747a015389fa4e63703c918330865e06ad80007"}, + {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"}, + {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"}, + {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"}, + {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"}, + {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"}, + {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"}, + {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"}, + {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"}, + {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"}, + {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"}, + {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"}, ] [package.extras] @@ -91,6 +108,7 @@ typecheck = ["mypy"] name = "black" version = "22.12.0" description = "The uncompromising code formatter." +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -126,6 +144,7 @@ uvloop = ["uvloop (>=0.15.2)"] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -137,6 +156,7 @@ files = [ name = "certifi" version = "2023.11.17" description = "Python package for providing Mozilla's CA Bundle." +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -148,6 +168,7 @@ files = [ name = "cffi" version = "1.16.0" description = "Foreign Function Interface for Python calling C code." +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -212,6 +233,7 @@ pycparser = "*" name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -311,6 +333,7 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -325,6 +348,7 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "codespell" version = "2.2.6" description = "Codespell" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -342,6 +366,7 @@ types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." +category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -353,6 +378,7 @@ files = [ name = "cosl" version = "0.0.7" description = "Utils for COS Lite charms" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -367,63 +393,64 @@ typing-extensions = "*" [[package]] name = "coverage" -version = "7.3.2" +version = "7.4.0" description = "Code coverage measurement for Python" +category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"}, - {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"}, - {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"}, - {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"}, - {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"}, - {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"}, - {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"}, - {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"}, - {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"}, - {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"}, - {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"}, - {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"}, - {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"}, - {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"}, - {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"}, - {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"}, - {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"}, - {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"}, - {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"}, - {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"}, - {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"}, - {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"}, - {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"}, - {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"}, - {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"}, - {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"}, - {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"}, - {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"}, - {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"}, - {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"}, - {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"}, - {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"}, - {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"}, - {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"}, - {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"}, - {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"}, - {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"}, - {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"}, - {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"}, - {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"}, - {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"}, - {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"}, - {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"}, - {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"}, - {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"}, - {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"}, - {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"}, - {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"}, - {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"}, - {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"}, - {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"}, - {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"}, + {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, + {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, + {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, + {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, + {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, + {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, + {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, + {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, + {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, + {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, + {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, + {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, + {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, + {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, + {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, + {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, + {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, + {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, + {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, + {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, + {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, + {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, ] [package.dependencies] @@ -434,53 +461,64 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "39.0.2" +version = "42.0.0" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"}, - {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"}, - {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"}, - {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"}, - {file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"}, - {file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"}, - {file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"}, + {file = "cryptography-42.0.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:c640b0ef54138fde761ec99a6c7dc4ce05e80420262c20fa239e694ca371d434"}, + {file = "cryptography-42.0.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:678cfa0d1e72ef41d48993a7be75a76b0725d29b820ff3cfd606a5b2b33fda01"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:146e971e92a6dd042214b537a726c9750496128453146ab0ee8971a0299dc9bd"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87086eae86a700307b544625e3ba11cc600c3c0ef8ab97b0fda0705d6db3d4e3"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0a68bfcf57a6887818307600c3c0ebc3f62fbb6ccad2240aa21887cda1f8df1b"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5a217bca51f3b91971400890905a9323ad805838ca3fa1e202a01844f485ee87"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ca20550bb590db16223eb9ccc5852335b48b8f597e2f6f0878bbfd9e7314eb17"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:33588310b5c886dfb87dba5f013b8d27df7ffd31dc753775342a1e5ab139e59d"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9515ea7f596c8092fdc9902627e51b23a75daa2c7815ed5aa8cf4f07469212ec"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:35cf6ed4c38f054478a9df14f03c1169bb14bd98f0b1705751079b25e1cb58bc"}, + {file = "cryptography-42.0.0-cp37-abi3-win32.whl", hash = "sha256:8814722cffcfd1fbd91edd9f3451b88a8f26a5fd41b28c1c9193949d1c689dc4"}, + {file = "cryptography-42.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:a2a8d873667e4fd2f34aedab02ba500b824692c6542e017075a2efc38f60a4c0"}, + {file = "cryptography-42.0.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:8fedec73d590fd30c4e3f0d0f4bc961aeca8390c72f3eaa1a0874d180e868ddf"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be41b0c7366e5549265adf2145135dca107718fa44b6e418dc7499cfff6b4689"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca482ea80626048975360c8e62be3ceb0f11803180b73163acd24bf014133a0"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c58115384bdcfe9c7f644c72f10f6f42bed7cf59f7b52fe1bf7ae0a622b3a139"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:56ce0c106d5c3fec1038c3cca3d55ac320a5be1b44bf15116732d0bc716979a2"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:324721d93b998cb7367f1e6897370644751e5580ff9b370c0a50dc60a2003513"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d97aae66b7de41cdf5b12087b5509e4e9805ed6f562406dfcf60e8481a9a28f8"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:85f759ed59ffd1d0baad296e72780aa62ff8a71f94dc1ab340386a1207d0ea81"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:206aaf42e031b93f86ad60f9f5d9da1b09164f25488238ac1dc488334eb5e221"}, + {file = "cryptography-42.0.0-cp39-abi3-win32.whl", hash = "sha256:74f18a4c8ca04134d2052a140322002fef535c99cdbc2a6afc18a8024d5c9d5b"}, + {file = "cryptography-42.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:14e4b909373bc5bf1095311fa0f7fcabf2d1a160ca13f1e9e467be1ac4cbdf94"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3005166a39b70c8b94455fdbe78d87a444da31ff70de3331cdec2c568cf25b7e"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:be14b31eb3a293fc6e6aa2807c8a3224c71426f7c4e3639ccf1a2f3ffd6df8c3"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bd7cf7a8d9f34cc67220f1195884151426ce616fdc8285df9054bfa10135925f"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c310767268d88803b653fffe6d6f2f17bb9d49ffceb8d70aed50ad45ea49ab08"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bdce70e562c69bb089523e75ef1d9625b7417c6297a76ac27b1b8b1eb51b7d0f"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e9326ca78111e4c645f7e49cbce4ed2f3f85e17b61a563328c85a5208cf34440"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:69fd009a325cad6fbfd5b04c711a4da563c6c4854fc4c9544bff3088387c77c0"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:988b738f56c665366b1e4bfd9045c3efae89ee366ca3839cd5af53eaa1401bce"}, + {file = "cryptography-42.0.0.tar.gz", hash = "sha256:6cf9b76d6e93c62114bd19485e5cb003115c134cf9ce91f8ac924c44f8c8c3f4"}, ] [package.dependencies] -cffi = ">=1.12" +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"] -sdist = ["setuptools-rust (>=0.11.4)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -tox = ["tox"] [[package]] name = "decorator" version = "5.1.1" description = "Decorators for Humans" +category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -492,6 +530,7 @@ files = [ name = "exceptiongroup" version = "1.2.0" description = "Backport of PEP 654 (exception groups)" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -506,6 +545,7 @@ test = ["pytest (>=6)"] name = "executing" version = "2.0.1" description = "Get the currently executing AST node of a frame, and other information" +category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -518,13 +558,14 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "google-auth" -version = "2.25.1" +version = "2.26.2" description = "Google Authentication Library" +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.25.1.tar.gz", hash = "sha256:d5d66b8f4f6e3273740d7bb73ddefa6c2d1ff691704bd407d51c6b5800e7c97b"}, - {file = "google_auth-2.25.1-py2.py3-none-any.whl", hash = "sha256:dfd7b44935d498e106c08883b2dac0ad36d8aa10402a6412e9a1c9d74b4773f1"}, + {file = "google-auth-2.26.2.tar.gz", hash = "sha256:97327dbbf58cccb58fc5a1712bba403ae76668e64814eb30f7316f7e27126b81"}, + {file = "google_auth-2.26.2-py2.py3-none-any.whl", hash = "sha256:3f445c8ce9b61ed6459aad86d8ccdba4a9afed841b2d1451a11ef4db08957424"}, ] [package.dependencies] @@ -541,13 +582,14 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "hvac" -version = "2.0.0" +version = "2.1.0" description = "HashiCorp Vault API client" +category = "dev" optional = false python-versions = ">=3.8,<4.0" files = [ - {file = "hvac-2.0.0-py3-none-any.whl", hash = "sha256:3b14d0979b98ea993eca73b7dac7161b5547ede369a9b28f4fa40f18e74ec3f3"}, - {file = "hvac-2.0.0.tar.gz", hash = "sha256:6a51cb9a0d22fe13e824cb0b0a1ce2eeacb9ce6af68b7d1b6689e25ec1becaf5"}, + {file = "hvac-2.1.0-py3-none-any.whl", hash = "sha256:73bc91e58c3fc7c6b8107cdaca9cb71fa0a893dfd80ffbc1c14e20f24c0c29d7"}, + {file = "hvac-2.1.0.tar.gz", hash = "sha256:b48bcda11a4ab0a7b6c47232c7ba7c87fda318ae2d4a7662800c465a78742894"}, ] [package.dependencies] @@ -560,6 +602,7 @@ parser = ["pyhcl (>=0.4.4,<0.5.0)"] name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" +category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -571,6 +614,7 @@ files = [ name = "importlib-resources" version = "6.1.1" description = "Read resources from Python packages" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -589,6 +633,7 @@ testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -600,6 +645,7 @@ files = [ name = "ipdb" version = "0.13.13" description = "IPython-enabled pdb" +category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -616,6 +662,7 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version < name = "ipython" version = "8.12.3" description = "IPython: Productive Interactive Computing" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -655,6 +702,7 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pa name = "jedi" version = "0.19.1" description = "An autocompletion tool for Python that can be used for text editors." +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -672,13 +720,14 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.3" description = "A very fast and expressive template engine." +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, ] [package.dependencies] @@ -689,13 +738,14 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.20.0" +version = "4.21.1" description = "An implementation of JSON Schema validation for Python" +category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.20.0-py3-none-any.whl", hash = "sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3"}, - {file = "jsonschema-4.20.0.tar.gz", hash = "sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa"}, + {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, + {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, ] [package.dependencies] @@ -712,13 +762,14 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- [[package]] name = "jsonschema-specifications" -version = "2023.11.2" +version = "2023.12.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema_specifications-2023.11.2-py3-none-any.whl", hash = "sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93"}, - {file = "jsonschema_specifications-2023.11.2.tar.gz", hash = "sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8"}, + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, ] [package.dependencies] @@ -729,6 +780,7 @@ referencing = ">=0.31.0" name = "juju" version = "3.3.0.0" description = "Python library for Juju" +category = "dev" optional = false python-versions = "*" files = [ @@ -755,6 +807,7 @@ websockets = [ name = "kafka-python" version = "2.0.2" description = "Pure Python client for Apache Kafka" +category = "dev" optional = false python-versions = "*" files = [ @@ -769,6 +822,7 @@ crc32c = ["crc32c"] name = "kazoo" version = "2.9.0" description = "\"Higher Level Zookeeper Client\"" +category = "main" optional = false python-versions = "*" files = [ @@ -790,13 +844,14 @@ test = ["eventlet (>=0.17.1)", "gevent (>=1.2)", "mock", "objgraph", "pytest", " [[package]] name = "kubernetes" -version = "28.1.0" +version = "29.0.0" description = "Kubernetes python client" +category = "dev" optional = false python-versions = ">=3.6" files = [ - {file = "kubernetes-28.1.0-py2.py3-none-any.whl", hash = "sha256:10f56f8160dcb73647f15fafda268e7f60cf7dbc9f8e46d52fcd46d3beb0c18d"}, - {file = "kubernetes-28.1.0.tar.gz", hash = "sha256:1468069a573430fb1cb5ad22876868f57977930f80a6749405da31cd6086a7e9"}, + {file = "kubernetes-29.0.0-py2.py3-none-any.whl", hash = "sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e"}, + {file = "kubernetes-29.0.0.tar.gz", hash = "sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459"}, ] [package.dependencies] @@ -808,25 +863,26 @@ pyyaml = ">=5.4.1" requests = "*" requests-oauthlib = "*" six = ">=1.9.0" -urllib3 = ">=1.24.2,<2.0" -websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" +urllib3 = ">=1.24.2" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.0 || >=0.43.0" [package.extras] adal = ["adal (>=1.0.2)"] [[package]] name = "macaroonbakery" -version = "1.3.2" +version = "1.3.4" description = "A Python library port for bakery, higher level operation to work with macaroons" +category = "dev" optional = false python-versions = "*" files = [ - {file = "macaroonbakery-1.3.2-py2.py3-none-any.whl", hash = "sha256:2960907fe9ff70c8687b17cd18dbb8f1f91ac4b944a73fd9f4e966c66cfac697"}, - {file = "macaroonbakery-1.3.2.tar.gz", hash = "sha256:bd1ab296129a031c7d6eb70cfb53d30762f731868dcd427d3ad769604a6247a9"}, + {file = "macaroonbakery-1.3.4-py2.py3-none-any.whl", hash = "sha256:1e952a189f5c1e96ef82b081b2852c770d7daa20987e2088e762dd5689fb253b"}, + {file = "macaroonbakery-1.3.4.tar.gz", hash = "sha256:41ca993a23e4f8ef2fe7723b5cd4a30c759735f1d5021e990770c8a0e0f33970"}, ] [package.dependencies] -protobuf = ">=3.0.0,<4.0" +protobuf = ">=3.20.0" pymacaroons = ">=0.12.0,<1.0" PyNaCl = ">=1.1.2,<2.0" pyRFC3339 = ">=1.0,<2.0" @@ -835,67 +891,79 @@ six = ">=1.11.0,<2.0" [[package]] name = "markupsafe" -version = "2.1.3" +version = "2.1.4" description = "Safely add untrusted strings to HTML/XML markup." +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-win32.whl", hash = "sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0"}, + {file = "MarkupSafe-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-win32.whl", hash = "sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74"}, + {file = "MarkupSafe-2.1.4-cp311-cp311-win_amd64.whl", hash = "sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-win32.whl", hash = "sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475"}, + {file = "MarkupSafe-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-win32.whl", hash = "sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0"}, + {file = "MarkupSafe-2.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-win32.whl", hash = "sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a"}, + {file = "MarkupSafe-2.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-win32.whl", hash = "sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6"}, + {file = "MarkupSafe-2.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959"}, + {file = "MarkupSafe-2.1.4.tar.gz", hash = "sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f"}, ] [[package]] name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" +category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -910,6 +978,7 @@ traitlets = "*" name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." +category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -921,6 +990,7 @@ files = [ name = "nodeenv" version = "1.8.0" description = "Node.js virtual environment builder" +category = "dev" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" files = [ @@ -935,6 +1005,7 @@ setuptools = "*" name = "oauthlib" version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -951,6 +1022,7 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] name = "ops" version = "2.9.0" description = "The Python library behind great charms" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -959,13 +1031,14 @@ files = [ ] [package.dependencies] -PyYAML = "==6.*" -websocket-client = "==1.*" +PyYAML = ">=6.0.0,<7.0.0" +websocket-client = ">=1.0.0,<2.0.0" [[package]] name = "packaging" version = "23.2" description = "Core utilities for Python packages" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -977,6 +1050,7 @@ files = [ name = "paramiko" version = "2.12.0" description = "SSH2 protocol library" +category = "dev" optional = false python-versions = "*" files = [ @@ -1000,6 +1074,7 @@ invoke = ["invoke (>=1.3)"] name = "parso" version = "0.8.3" description = "A Python Parser" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1013,19 +1088,21 @@ testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathspec" -version = "0.11.2" +version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, - {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] [[package]] name = "pexpect" version = "4.9.0" description = "Pexpect allows easy control of interactive console applications." +category = "dev" optional = false python-versions = "*" files = [ @@ -1040,6 +1117,7 @@ ptyprocess = ">=0.5" name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" +category = "dev" optional = false python-versions = "*" files = [ @@ -1051,6 +1129,7 @@ files = [ name = "pkgutil-resolve-name" version = "1.3.10" description = "Resolve a name to an object." +category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1062,6 +1141,7 @@ files = [ name = "platformdirs" version = "4.1.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1075,13 +1155,14 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co [[package]] name = "pluggy" -version = "1.3.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" +category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] @@ -1090,13 +1171,14 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "prompt-toolkit" -version = "3.0.41" +version = "3.0.43" description = "Library for building powerful interactive command lines in Python" +category = "dev" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.41-py3-none-any.whl", hash = "sha256:f36fe301fafb7470e86aaf90f036eef600a3210be4decf461a5b1ca8403d3cb2"}, - {file = "prompt_toolkit-3.0.41.tar.gz", hash = "sha256:941367d97fc815548822aa26c2a269fdc4eb21e9ec05fc5d447cf09bad5d75f0"}, + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, ] [package.dependencies] @@ -1104,39 +1186,30 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "3.20.3" -description = "Protocol Buffers" +version = "4.25.2" +description = "" +category = "dev" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, - {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, - {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, - {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, - {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, - {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, - {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, - {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, - {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, - {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, - {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, - {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, - {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, - {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, - {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, - {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, - {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, - {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, - {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, - {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, - {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, - {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, + {file = "protobuf-4.25.2-cp310-abi3-win32.whl", hash = "sha256:b50c949608682b12efb0b2717f53256f03636af5f60ac0c1d900df6213910fd6"}, + {file = "protobuf-4.25.2-cp310-abi3-win_amd64.whl", hash = "sha256:8f62574857ee1de9f770baf04dde4165e30b15ad97ba03ceac65f760ff018ac9"}, + {file = "protobuf-4.25.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2db9f8fa64fbdcdc93767d3cf81e0f2aef176284071507e3ede160811502fd3d"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:10894a2885b7175d3984f2be8d9850712c57d5e7587a2410720af8be56cdaf62"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fc381d1dd0516343f1440019cedf08a7405f791cd49eef4ae1ea06520bc1c020"}, + {file = "protobuf-4.25.2-cp38-cp38-win32.whl", hash = "sha256:33a1aeef4b1927431d1be780e87b641e322b88d654203a9e9d93f218ee359e61"}, + {file = "protobuf-4.25.2-cp38-cp38-win_amd64.whl", hash = "sha256:47f3de503fe7c1245f6f03bea7e8d3ec11c6c4a2ea9ef910e3221c8a15516d62"}, + {file = "protobuf-4.25.2-cp39-cp39-win32.whl", hash = "sha256:5e5c933b4c30a988b52e0b7c02641760a5ba046edc5e43d3b94a74c9fc57c1b3"}, + {file = "protobuf-4.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:d66a769b8d687df9024f2985d5137a337f957a0916cf5464d1513eee96a63ff0"}, + {file = "protobuf-4.25.2-py3-none-any.whl", hash = "sha256:a8b7a98d4ce823303145bf3c1a8bdb0f2f4642a414b196f04ad9853ed0c8f830"}, + {file = "protobuf-4.25.2.tar.gz", hash = "sha256:fe599e175cb347efc8ee524bcd4b902d11f7262c0e569ececcb89995c15f0a5e"}, ] [[package]] name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" +category = "dev" optional = false python-versions = "*" files = [ @@ -1148,6 +1221,7 @@ files = [ name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" +category = "dev" optional = false python-versions = "*" files = [ @@ -1162,6 +1236,7 @@ tests = ["pytest"] name = "pure-sasl" version = "0.6.2" description = "Pure Python client SASL implementation" +category = "main" optional = false python-versions = "*" files = [ @@ -1176,6 +1251,7 @@ gssapi = ["kerberos (>=1.3.0)"] name = "pyasn1" version = "0.5.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -1187,6 +1263,7 @@ files = [ name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" +category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -1201,6 +1278,7 @@ pyasn1 = ">=0.4.6,<0.6.0" name = "pycparser" version = "2.21" description = "C parser in Python" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1210,47 +1288,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.13" +version = "1.10.14" description = "Data validation and settings management using python type hints" +category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"}, - {file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"}, - {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"}, - {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"}, - {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"}, - {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"}, - {file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"}, - {file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"}, - {file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"}, - {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"}, - {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"}, - {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"}, - {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"}, - {file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"}, - {file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"}, - {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"}, - {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"}, - {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"}, - {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"}, - {file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"}, - {file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"}, - {file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"}, - {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"}, - {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"}, - {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"}, - {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"}, - {file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"}, - {file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"}, - {file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"}, - {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"}, - {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"}, - {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"}, - {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"}, - {file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"}, - {file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"}, - {file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"}, + {file = "pydantic-1.10.14-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7f4fcec873f90537c382840f330b90f4715eebc2bc9925f04cb92de593eae054"}, + {file = "pydantic-1.10.14-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e3a76f571970fcd3c43ad982daf936ae39b3e90b8a2e96c04113a369869dc87"}, + {file = "pydantic-1.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d886bd3c3fbeaa963692ef6b643159ccb4b4cefaf7ff1617720cbead04fd1d"}, + {file = "pydantic-1.10.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:798a3d05ee3b71967844a1164fd5bdb8c22c6d674f26274e78b9f29d81770c4e"}, + {file = "pydantic-1.10.14-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:23d47a4b57a38e8652bcab15a658fdb13c785b9ce217cc3a729504ab4e1d6bc9"}, + {file = "pydantic-1.10.14-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9f674b5c3bebc2eba401de64f29948ae1e646ba2735f884d1594c5f675d6f2a"}, + {file = "pydantic-1.10.14-cp310-cp310-win_amd64.whl", hash = "sha256:24a7679fab2e0eeedb5a8924fc4a694b3bcaac7d305aeeac72dd7d4e05ecbebf"}, + {file = "pydantic-1.10.14-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9d578ac4bf7fdf10ce14caba6f734c178379bd35c486c6deb6f49006e1ba78a7"}, + {file = "pydantic-1.10.14-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa7790e94c60f809c95602a26d906eba01a0abee9cc24150e4ce2189352deb1b"}, + {file = "pydantic-1.10.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad4e10efa5474ed1a611b6d7f0d130f4aafadceb73c11d9e72823e8f508e663"}, + {file = "pydantic-1.10.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1245f4f61f467cb3dfeced2b119afef3db386aec3d24a22a1de08c65038b255f"}, + {file = "pydantic-1.10.14-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:21efacc678a11114c765eb52ec0db62edffa89e9a562a94cbf8fa10b5db5c046"}, + {file = "pydantic-1.10.14-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:412ab4a3f6dbd2bf18aefa9f79c7cca23744846b31f1d6555c2ee2b05a2e14ca"}, + {file = "pydantic-1.10.14-cp311-cp311-win_amd64.whl", hash = "sha256:e897c9f35281f7889873a3e6d6b69aa1447ceb024e8495a5f0d02ecd17742a7f"}, + {file = "pydantic-1.10.14-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d604be0f0b44d473e54fdcb12302495fe0467c56509a2f80483476f3ba92b33c"}, + {file = "pydantic-1.10.14-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42c7d17706911199798d4c464b352e640cab4351efe69c2267823d619a937e5"}, + {file = "pydantic-1.10.14-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:596f12a1085e38dbda5cbb874d0973303e34227b400b6414782bf205cc14940c"}, + {file = "pydantic-1.10.14-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bfb113860e9288d0886e3b9e49d9cf4a9d48b441f52ded7d96db7819028514cc"}, + {file = "pydantic-1.10.14-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bc3ed06ab13660b565eed80887fcfbc0070f0aa0691fbb351657041d3e874efe"}, + {file = "pydantic-1.10.14-cp37-cp37m-win_amd64.whl", hash = "sha256:ad8c2bc677ae5f6dbd3cf92f2c7dc613507eafe8f71719727cbc0a7dec9a8c01"}, + {file = "pydantic-1.10.14-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c37c28449752bb1f47975d22ef2882d70513c546f8f37201e0fec3a97b816eee"}, + {file = "pydantic-1.10.14-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49a46a0994dd551ec051986806122767cf144b9702e31d47f6d493c336462597"}, + {file = "pydantic-1.10.14-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53e3819bd20a42470d6dd0fe7fc1c121c92247bca104ce608e609b59bc7a77ee"}, + {file = "pydantic-1.10.14-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0fbb503bbbbab0c588ed3cd21975a1d0d4163b87e360fec17a792f7d8c4ff29f"}, + {file = "pydantic-1.10.14-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:336709883c15c050b9c55a63d6c7ff09be883dbc17805d2b063395dd9d9d0022"}, + {file = "pydantic-1.10.14-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4ae57b4d8e3312d486e2498d42aed3ece7b51848336964e43abbf9671584e67f"}, + {file = "pydantic-1.10.14-cp38-cp38-win_amd64.whl", hash = "sha256:dba49d52500c35cfec0b28aa8b3ea5c37c9df183ffc7210b10ff2a415c125c4a"}, + {file = "pydantic-1.10.14-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c66609e138c31cba607d8e2a7b6a5dc38979a06c900815495b2d90ce6ded35b4"}, + {file = "pydantic-1.10.14-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d986e115e0b39604b9eee3507987368ff8148222da213cd38c359f6f57b3b347"}, + {file = "pydantic-1.10.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:646b2b12df4295b4c3148850c85bff29ef6d0d9621a8d091e98094871a62e5c7"}, + {file = "pydantic-1.10.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282613a5969c47c83a8710cc8bfd1e70c9223feb76566f74683af889faadc0ea"}, + {file = "pydantic-1.10.14-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:466669501d08ad8eb3c4fecd991c5e793c4e0bbd62299d05111d4f827cded64f"}, + {file = "pydantic-1.10.14-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:13e86a19dca96373dcf3190fcb8797d40a6f12f154a244a8d1e8e03b8f280593"}, + {file = "pydantic-1.10.14-cp39-cp39-win_amd64.whl", hash = "sha256:08b6ec0917c30861e3fe71a93be1648a2aa4f62f866142ba21670b24444d7fd8"}, + {file = "pydantic-1.10.14-py3-none-any.whl", hash = "sha256:8ee853cd12ac2ddbf0ecbac1c289f95882b2d4482258048079d13be700aa114c"}, + {file = "pydantic-1.10.14.tar.gz", hash = "sha256:46f17b832fe27de7850896f3afee50ea682220dd218f7e9c88d436788419dca6"}, ] [package.dependencies] @@ -1264,6 +1343,7 @@ email = ["email-validator (>=1.0.3)"] name = "pygments" version = "2.17.2" description = "Pygments is a syntax highlighting package written in Python." +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1279,6 +1359,7 @@ windows-terminal = ["colorama (>=0.4.6)"] name = "pymacaroons" version = "0.13.0" description = "Macaroon library for Python" +category = "dev" optional = false python-versions = "*" files = [ @@ -1294,6 +1375,7 @@ six = ">=1.8.0" name = "pynacl" version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1320,6 +1402,7 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] name = "pyrfc3339" version = "1.1" description = "Generate and parse RFC 3339 timestamps" +category = "dev" optional = false python-versions = "*" files = [ @@ -1332,13 +1415,14 @@ pytz = "*" [[package]] name = "pyright" -version = "1.1.339" +version = "1.1.348" description = "Command line wrapper for pyright" +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.339-py3-none-any.whl", hash = "sha256:662f3df170fdeda76fd21b158ab20c518dad99c2f14b0a7f84c2bfd60d5a8d2a"}, - {file = "pyright-1.1.339.tar.gz", hash = "sha256:581ce4e281575814380dd67a331e75c0ccdca31eb848005ee1ae46e7bfa8b4f9"}, + {file = "pyright-1.1.348-py3-none-any.whl", hash = "sha256:e7d4df504c4c082b5c3725a8c15fc3fda62da5d09fc77994baa77f359a1b62f2"}, + {file = "pyright-1.1.348.tar.gz", hash = "sha256:1c6994546f7ab130b9da8c357f8b2a99bef268b6d8ae2eae292bde66923aa7af"}, ] [package.dependencies] @@ -1350,13 +1434,14 @@ dev = ["twine (>=3.4.1)"] [[package]] name = "pytest" -version = "7.4.3" +version = "7.4.4" description = "pytest: simple powerful testing with Python" +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, - {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, ] [package.dependencies] @@ -1374,6 +1459,7 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "pytest-asyncio" version = "0.21.1" description = "Pytest support for asyncio" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1388,15 +1474,34 @@ pytest = ">=7.0.0" docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] +[[package]] +name = "pytest-mock" +version = "3.12.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "pytest-operator" -version = "0.31.1" +version = "0.32.0" description = "Fixtures for Operators" +category = "dev" optional = false python-versions = "*" files = [ - {file = "pytest-operator-0.31.1.tar.gz", hash = "sha256:83a5c5a2ec781eeea97e678b777cf3469e4502fc07ff8b1b8b0f9b160cff4d32"}, - {file = "pytest_operator-0.31.1-py3-none-any.whl", hash = "sha256:103fd6a2dfe66c7d8f317359ca0f10bd9b3a8483581d9351e5e96aff1d4e8349"}, + {file = "pytest-operator-0.32.0.tar.gz", hash = "sha256:9e7b3b1384118110654f86bb6aaf772d29c6f38aec05492707ad09beff7b645b"}, + {file = "pytest_operator-0.32.0-py3-none-any.whl", hash = "sha256:a03efd6e3aea5f5c7395ef64c45d6d1719fde61f8593804dc5c8ffff561ecfd4"}, ] [package.dependencies] @@ -1411,6 +1516,7 @@ pyyaml = "*" name = "pytest-operator-cache" version = "0.1.0" description = "" +category = "dev" optional = false python-versions = "^3.8" files = [] @@ -1422,14 +1528,14 @@ pyyaml = "*" [package.source] type = "git" url = "https://github.com/canonical/data-platform-workflows" -reference = "v6.1.1" -resolved_reference = "c9bf1f5fb128800ed4f0a317509884f75c63902b" +reference = "v7" +resolved_reference = "9ab9063d00a9b80b87b30f8d6f33a71b92702574" subdirectory = "python/pytest_plugins/pytest_operator_cache" - [[package]] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" +category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -1444,6 +1550,7 @@ six = ">=1.5" name = "pytz" version = "2023.3.post1" description = "World timezone definitions, modern and historical" +category = "dev" optional = false python-versions = "*" files = [ @@ -1455,6 +1562,7 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" +category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1463,6 +1571,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1470,8 +1579,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1488,6 +1604,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1495,6 +1612,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1502,13 +1620,14 @@ files = [ [[package]] name = "referencing" -version = "0.31.1" +version = "0.32.1" description = "JSON Referencing + Python" +category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.31.1-py3-none-any.whl", hash = "sha256:c19c4d006f1757e3dd75c4f784d38f8698d87b649c54f9ace14e5e8c9667c01d"}, - {file = "referencing-0.31.1.tar.gz", hash = "sha256:81a1471c68c9d5e3831c30ad1dd9815c45b558e596653db751a2bfdd17b3b9ec"}, + {file = "referencing-0.32.1-py3-none-any.whl", hash = "sha256:7e4dc12271d8e15612bfe35792f5ea1c40970dadf8624602e33db2758f7ee554"}, + {file = "referencing-0.32.1.tar.gz", hash = "sha256:3c57da0513e9563eb7e203ebe9bb3a1b509b042016433bd1e45a2853466c3dd3"}, ] [package.dependencies] @@ -1519,6 +1638,7 @@ rpds-py = ">=0.7.0" name = "requests" version = "2.31.0" description = "Python HTTP for Humans." +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1540,6 +1660,7 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "requests-oauthlib" version = "1.3.1" description = "OAuthlib authentication support for Requests." +category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1556,116 +1677,118 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] [[package]] name = "rpds-py" -version = "0.13.2" +version = "0.17.1" description = "Python bindings to Rust's persistent data structures (rpds)" +category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.13.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1ceebd0ae4f3e9b2b6b553b51971921853ae4eebf3f54086be0565d59291e53d"}, - {file = "rpds_py-0.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:46e1ed994a0920f350a4547a38471217eb86f57377e9314fbaaa329b71b7dfe3"}, - {file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee353bb51f648924926ed05e0122b6a0b1ae709396a80eb583449d5d477fcdf7"}, - {file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:530190eb0cd778363bbb7596612ded0bb9fef662daa98e9d92a0419ab27ae914"}, - {file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d311e44dd16d2434d5506d57ef4d7036544fc3c25c14b6992ef41f541b10fb"}, - {file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e72f750048b32d39e87fc85c225c50b2a6715034848dbb196bf3348aa761fa1"}, - {file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db09b98c7540df69d4b47218da3fbd7cb466db0fb932e971c321f1c76f155266"}, - {file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ac26f50736324beb0282c819668328d53fc38543fa61eeea2c32ea8ea6eab8d"}, - {file = "rpds_py-0.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12ecf89bd54734c3c2c79898ae2021dca42750c7bcfb67f8fb3315453738ac8f"}, - {file = "rpds_py-0.13.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a44c8440183b43167fd1a0819e8356692bf5db1ad14ce140dbd40a1485f2dea"}, - {file = "rpds_py-0.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcef4f2d3dc603150421de85c916da19471f24d838c3c62a4f04c1eb511642c1"}, - {file = "rpds_py-0.13.2-cp310-none-win32.whl", hash = "sha256:ee6faebb265e28920a6f23a7d4c362414b3f4bb30607141d718b991669e49ddc"}, - {file = "rpds_py-0.13.2-cp310-none-win_amd64.whl", hash = "sha256:ac96d67b37f28e4b6ecf507c3405f52a40658c0a806dffde624a8fcb0314d5fd"}, - {file = "rpds_py-0.13.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:b5f6328e8e2ae8238fc767703ab7b95785521c42bb2b8790984e3477d7fa71ad"}, - {file = "rpds_py-0.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:729408136ef8d45a28ee9a7411917c9e3459cf266c7e23c2f7d4bb8ef9e0da42"}, - {file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65cfed9c807c27dee76407e8bb29e6f4e391e436774bcc769a037ff25ad8646e"}, - {file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aefbdc934115d2f9278f153952003ac52cd2650e7313750390b334518c589568"}, - {file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48db29bd47814671afdd76c7652aefacc25cf96aad6daefa82d738ee87461e2"}, - {file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c55d7f2d817183d43220738270efd3ce4e7a7b7cbdaefa6d551ed3d6ed89190"}, - {file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6aadae3042f8e6db3376d9e91f194c606c9a45273c170621d46128f35aef7cd0"}, - {file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5feae2f9aa7270e2c071f488fab256d768e88e01b958f123a690f1cc3061a09c"}, - {file = "rpds_py-0.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:51967a67ea0d7b9b5cd86036878e2d82c0b6183616961c26d825b8c994d4f2c8"}, - {file = "rpds_py-0.13.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d0c10d803549427f427085ed7aebc39832f6e818a011dcd8785e9c6a1ba9b3e"}, - {file = "rpds_py-0.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:603d5868f7419081d616dab7ac3cfa285296735e7350f7b1e4f548f6f953ee7d"}, - {file = "rpds_py-0.13.2-cp311-none-win32.whl", hash = "sha256:b8996ffb60c69f677245f5abdbcc623e9442bcc91ed81b6cd6187129ad1fa3e7"}, - {file = "rpds_py-0.13.2-cp311-none-win_amd64.whl", hash = "sha256:5379e49d7e80dca9811b36894493d1c1ecb4c57de05c36f5d0dd09982af20211"}, - {file = "rpds_py-0.13.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8a776a29b77fe0cc28fedfd87277b0d0f7aa930174b7e504d764e0b43a05f381"}, - {file = "rpds_py-0.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a1472956c5bcc49fb0252b965239bffe801acc9394f8b7c1014ae9258e4572b"}, - {file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f252dfb4852a527987a9156cbcae3022a30f86c9d26f4f17b8c967d7580d65d2"}, - {file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f0d320e70b6b2300ff6029e234e79fe44e9dbbfc7b98597ba28e054bd6606a57"}, - {file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ade2ccb937060c299ab0dfb2dea3d2ddf7e098ed63ee3d651ebfc2c8d1e8632a"}, - {file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9d121be0217787a7d59a5c6195b0842d3f701007333426e5154bf72346aa658"}, - {file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fa6bd071ec6d90f6e7baa66ae25820d57a8ab1b0a3c6d3edf1834d4b26fafa2"}, - {file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c918621ee0a3d1fe61c313f2489464f2ae3d13633e60f520a8002a5e910982ee"}, - {file = "rpds_py-0.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:25b28b3d33ec0a78e944aaaed7e5e2a94ac811bcd68b557ca48a0c30f87497d2"}, - {file = "rpds_py-0.13.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:31e220a040b89a01505128c2f8a59ee74732f666439a03e65ccbf3824cdddae7"}, - {file = "rpds_py-0.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:15253fff410873ebf3cfba1cc686a37711efcd9b8cb30ea21bb14a973e393f60"}, - {file = "rpds_py-0.13.2-cp312-none-win32.whl", hash = "sha256:b981a370f8f41c4024c170b42fbe9e691ae2dbc19d1d99151a69e2c84a0d194d"}, - {file = "rpds_py-0.13.2-cp312-none-win_amd64.whl", hash = "sha256:4c4e314d36d4f31236a545696a480aa04ea170a0b021e9a59ab1ed94d4c3ef27"}, - {file = "rpds_py-0.13.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:80e5acb81cb49fd9f2d5c08f8b74ffff14ee73b10ca88297ab4619e946bcb1e1"}, - {file = "rpds_py-0.13.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:efe093acc43e869348f6f2224df7f452eab63a2c60a6c6cd6b50fd35c4e075ba"}, - {file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c2a61c0e4811012b0ba9f6cdcb4437865df5d29eab5d6018ba13cee1c3064a0"}, - {file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:751758d9dd04d548ec679224cc00e3591f5ebf1ff159ed0d4aba6a0746352452"}, - {file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ba8858933f0c1a979781272a5f65646fca8c18c93c99c6ddb5513ad96fa54b1"}, - {file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bfdfbe6a36bc3059fff845d64c42f2644cf875c65f5005db54f90cdfdf1df815"}, - {file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0379c1935c44053c98826bc99ac95f3a5355675a297ac9ce0dfad0ce2d50ca"}, - {file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5593855b5b2b73dd8413c3fdfa5d95b99d657658f947ba2c4318591e745d083"}, - {file = "rpds_py-0.13.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2a7bef6977043673750a88da064fd513f89505111014b4e00fbdd13329cd4e9a"}, - {file = "rpds_py-0.13.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:3ab96754d23372009638a402a1ed12a27711598dd49d8316a22597141962fe66"}, - {file = "rpds_py-0.13.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e06cfea0ece444571d24c18ed465bc93afb8c8d8d74422eb7026662f3d3f779b"}, - {file = "rpds_py-0.13.2-cp38-none-win32.whl", hash = "sha256:5493569f861fb7b05af6d048d00d773c6162415ae521b7010197c98810a14cab"}, - {file = "rpds_py-0.13.2-cp38-none-win_amd64.whl", hash = "sha256:b07501b720cf060c5856f7b5626e75b8e353b5f98b9b354a21eb4bfa47e421b1"}, - {file = "rpds_py-0.13.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:881df98f0a8404d32b6de0fd33e91c1b90ed1516a80d4d6dc69d414b8850474c"}, - {file = "rpds_py-0.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d79c159adea0f1f4617f54aa156568ac69968f9ef4d1e5fefffc0a180830308e"}, - {file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38d4f822ee2f338febcc85aaa2547eb5ba31ba6ff68d10b8ec988929d23bb6b4"}, - {file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5d75d6d220d55cdced2f32cc22f599475dbe881229aeddba6c79c2e9df35a2b3"}, - {file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d97e9ae94fb96df1ee3cb09ca376c34e8a122f36927230f4c8a97f469994bff"}, - {file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67a429520e97621a763cf9b3ba27574779c4e96e49a27ff8a1aa99ee70beb28a"}, - {file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:188435794405c7f0573311747c85a96b63c954a5f2111b1df8018979eca0f2f0"}, - {file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:38f9bf2ad754b4a45b8210a6c732fe876b8a14e14d5992a8c4b7c1ef78740f53"}, - {file = "rpds_py-0.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a6ba2cb7d676e9415b9e9ac7e2aae401dc1b1e666943d1f7bc66223d3d73467b"}, - {file = "rpds_py-0.13.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:eaffbd8814bb1b5dc3ea156a4c5928081ba50419f9175f4fc95269e040eff8f0"}, - {file = "rpds_py-0.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a4c1058cdae6237d97af272b326e5f78ee7ee3bbffa6b24b09db4d828810468"}, - {file = "rpds_py-0.13.2-cp39-none-win32.whl", hash = "sha256:b5267feb19070bef34b8dea27e2b504ebd9d31748e3ecacb3a4101da6fcb255c"}, - {file = "rpds_py-0.13.2-cp39-none-win_amd64.whl", hash = "sha256:ddf23960cb42b69bce13045d5bc66f18c7d53774c66c13f24cf1b9c144ba3141"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:97163a1ab265a1073a6372eca9f4eeb9f8c6327457a0b22ddfc4a17dcd613e74"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:25ea41635d22b2eb6326f58e608550e55d01df51b8a580ea7e75396bafbb28e9"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d59d4d451ba77f08cb4cd9268dec07be5bc65f73666302dbb5061989b17198"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7c564c58cf8f248fe859a4f0fe501b050663f3d7fbc342172f259124fb59933"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61dbc1e01dc0c5875da2f7ae36d6e918dc1b8d2ce04e871793976594aad8a57a"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdb82eb60d31b0c033a8e8ee9f3fc7dfbaa042211131c29da29aea8531b4f18f"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d204957169f0b3511fb95395a9da7d4490fb361763a9f8b32b345a7fe119cb45"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c45008ca79bad237cbc03c72bc5205e8c6f66403773929b1b50f7d84ef9e4d07"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:79bf58c08f0756adba691d480b5a20e4ad23f33e1ae121584cf3a21717c36dfa"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e86593bf8637659e6a6ed58854b6c87ec4e9e45ee8a4adfd936831cef55c2d21"}, - {file = "rpds_py-0.13.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d329896c40d9e1e5c7715c98529e4a188a1f2df51212fd65102b32465612b5dc"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4a5375c5fff13f209527cd886dc75394f040c7d1ecad0a2cb0627f13ebe78a12"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:06d218e4464d31301e943b65b2c6919318ea6f69703a351961e1baaf60347276"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f41d32a2ddc5a94df4b829b395916a4b7f103350fa76ba6de625fcb9e773ac"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6bc568b05e02cd612be53900c88aaa55012e744930ba2eeb56279db4c6676eb3"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d94d78418203904730585efa71002286ac4c8ac0689d0eb61e3c465f9e608ff"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bed0252c85e21cf73d2d033643c945b460d6a02fc4a7d644e3b2d6f5f2956c64"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244e173bb6d8f3b2f0c4d7370a1aa341f35da3e57ffd1798e5b2917b91731fd3"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7f55cd9cf1564b7b03f238e4c017ca4794c05b01a783e9291065cb2858d86ce4"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:f03a1b3a4c03e3e0161642ac5367f08479ab29972ea0ffcd4fa18f729cd2be0a"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:f5f4424cb87a20b016bfdc157ff48757b89d2cc426256961643d443c6c277007"}, - {file = "rpds_py-0.13.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c82bbf7e03748417c3a88c1b0b291288ce3e4887a795a3addaa7a1cfd9e7153e"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c0095b8aa3e432e32d372e9a7737e65b58d5ed23b9620fea7cb81f17672f1fa1"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4c2d26aa03d877c9730bf005621c92da263523a1e99247590abbbe252ccb7824"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96f2975fb14f39c5fe75203f33dd3010fe37d1c4e33177feef1107b5ced750e3"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4dcc5ee1d0275cb78d443fdebd0241e58772a354a6d518b1d7af1580bbd2c4e8"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61d42d2b08430854485135504f672c14d4fc644dd243a9c17e7c4e0faf5ed07e"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d3a61e928feddc458a55110f42f626a2a20bea942ccedb6fb4cee70b4830ed41"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7de12b69d95072394998c622cfd7e8cea8f560db5fca6a62a148f902a1029f8b"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87a90f5545fd61f6964e65eebde4dc3fa8660bb7d87adb01d4cf17e0a2b484ad"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:9c95a1a290f9acf7a8f2ebbdd183e99215d491beea52d61aa2a7a7d2c618ddc6"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:35f53c76a712e323c779ca39b9a81b13f219a8e3bc15f106ed1e1462d56fcfe9"}, - {file = "rpds_py-0.13.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:96fb0899bb2ab353f42e5374c8f0789f54e0a94ef2f02b9ac7149c56622eaf31"}, - {file = "rpds_py-0.13.2.tar.gz", hash = "sha256:f8eae66a1304de7368932b42d801c67969fd090ddb1a7a24f27b435ed4bed68f"}, + {file = "rpds_py-0.17.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4128980a14ed805e1b91a7ed551250282a8ddf8201a4e9f8f5b7e6225f54170d"}, + {file = "rpds_py-0.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ff1dcb8e8bc2261a088821b2595ef031c91d499a0c1b031c152d43fe0a6ecec8"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d65e6b4f1443048eb7e833c2accb4fa7ee67cc7d54f31b4f0555b474758bee55"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a71169d505af63bb4d20d23a8fbd4c6ce272e7bce6cc31f617152aa784436f29"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:436474f17733c7dca0fbf096d36ae65277e8645039df12a0fa52445ca494729d"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10162fe3f5f47c37ebf6d8ff5a2368508fe22007e3077bf25b9c7d803454d921"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:720215373a280f78a1814becb1312d4e4d1077b1202a56d2b0815e95ccb99ce9"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70fcc6c2906cfa5c6a552ba7ae2ce64b6c32f437d8f3f8eea49925b278a61453"}, + {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91e5a8200e65aaac342a791272c564dffcf1281abd635d304d6c4e6b495f29dc"}, + {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:99f567dae93e10be2daaa896e07513dd4bf9c2ecf0576e0533ac36ba3b1d5394"}, + {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24e4900a6643f87058a27320f81336d527ccfe503984528edde4bb660c8c8d59"}, + {file = "rpds_py-0.17.1-cp310-none-win32.whl", hash = "sha256:0bfb09bf41fe7c51413f563373e5f537eaa653d7adc4830399d4e9bdc199959d"}, + {file = "rpds_py-0.17.1-cp310-none-win_amd64.whl", hash = "sha256:20de7b7179e2031a04042e85dc463a93a82bc177eeba5ddd13ff746325558aa6"}, + {file = "rpds_py-0.17.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:65dcf105c1943cba45d19207ef51b8bc46d232a381e94dd38719d52d3980015b"}, + {file = "rpds_py-0.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:01f58a7306b64e0a4fe042047dd2b7d411ee82e54240284bab63e325762c1147"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:071bc28c589b86bc6351a339114fb7a029f5cddbaca34103aa573eba7b482382"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae35e8e6801c5ab071b992cb2da958eee76340e6926ec693b5ff7d6381441745"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149c5cd24f729e3567b56e1795f74577aa3126c14c11e457bec1b1c90d212e38"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e796051f2070f47230c745d0a77a91088fbee2cc0502e9b796b9c6471983718c"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e820ee1004327609b28db8307acc27f5f2e9a0b185b2064c5f23e815f248f8"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1957a2ab607f9added64478a6982742eb29f109d89d065fa44e01691a20fc20a"}, + {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8587fd64c2a91c33cdc39d0cebdaf30e79491cc029a37fcd458ba863f8815383"}, + {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4dc889a9d8a34758d0fcc9ac86adb97bab3fb7f0c4d29794357eb147536483fd"}, + {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2953937f83820376b5979318840f3ee47477d94c17b940fe31d9458d79ae7eea"}, + {file = "rpds_py-0.17.1-cp311-none-win32.whl", hash = "sha256:1bfcad3109c1e5ba3cbe2f421614e70439f72897515a96c462ea657261b96518"}, + {file = "rpds_py-0.17.1-cp311-none-win_amd64.whl", hash = "sha256:99da0a4686ada4ed0f778120a0ea8d066de1a0a92ab0d13ae68492a437db78bf"}, + {file = "rpds_py-0.17.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1dc29db3900cb1bb40353772417800f29c3d078dbc8024fd64655a04ee3c4bdf"}, + {file = "rpds_py-0.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82ada4a8ed9e82e443fcef87e22a3eed3654dd3adf6e3b3a0deb70f03e86142a"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d36b2b59e8cc6e576f8f7b671e32f2ff43153f0ad6d0201250a7c07f25d570e"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3677fcca7fb728c86a78660c7fb1b07b69b281964673f486ae72860e13f512ad"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:516fb8c77805159e97a689e2f1c80655c7658f5af601c34ffdb916605598cda2"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df3b6f45ba4515632c5064e35ca7f31d51d13d1479673185ba8f9fefbbed58b9"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a967dd6afda7715d911c25a6ba1517975acd8d1092b2f326718725461a3d33f9"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dbbb95e6fc91ea3102505d111b327004d1c4ce98d56a4a02e82cd451f9f57140"}, + {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02866e060219514940342a1f84303a1ef7a1dad0ac311792fbbe19b521b489d2"}, + {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2528ff96d09f12e638695f3a2e0c609c7b84c6df7c5ae9bfeb9252b6fa686253"}, + {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bd345a13ce06e94c753dab52f8e71e5252aec1e4f8022d24d56decd31e1b9b23"}, + {file = "rpds_py-0.17.1-cp312-none-win32.whl", hash = "sha256:2a792b2e1d3038daa83fa474d559acfd6dc1e3650ee93b2662ddc17dbff20ad1"}, + {file = "rpds_py-0.17.1-cp312-none-win_amd64.whl", hash = "sha256:292f7344a3301802e7c25c53792fae7d1593cb0e50964e7bcdcc5cf533d634e3"}, + {file = "rpds_py-0.17.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8ffe53e1d8ef2520ebcf0c9fec15bb721da59e8ef283b6ff3079613b1e30513d"}, + {file = "rpds_py-0.17.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4341bd7579611cf50e7b20bb8c2e23512a3dc79de987a1f411cb458ab670eb90"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4eb548daf4836e3b2c662033bfbfc551db58d30fd8fe660314f86bf8510b93"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b686f25377f9c006acbac63f61614416a6317133ab7fafe5de5f7dc8a06d42eb"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e21b76075c01d65d0f0f34302b5a7457d95721d5e0667aea65e5bb3ab415c25"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b86b21b348f7e5485fae740d845c65a880f5d1eda1e063bc59bef92d1f7d0c55"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f175e95a197f6a4059b50757a3dca33b32b61691bdbd22c29e8a8d21d3914cae"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1701fc54460ae2e5efc1dd6350eafd7a760f516df8dbe51d4a1c79d69472fbd4"}, + {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9051e3d2af8f55b42061603e29e744724cb5f65b128a491446cc029b3e2ea896"}, + {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7450dbd659fed6dd41d1a7d47ed767e893ba402af8ae664c157c255ec6067fde"}, + {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5a024fa96d541fd7edaa0e9d904601c6445e95a729a2900c5aec6555fe921ed6"}, + {file = "rpds_py-0.17.1-cp38-none-win32.whl", hash = "sha256:da1ead63368c04a9bded7904757dfcae01eba0e0f9bc41d3d7f57ebf1c04015a"}, + {file = "rpds_py-0.17.1-cp38-none-win_amd64.whl", hash = "sha256:841320e1841bb53fada91c9725e766bb25009cfd4144e92298db296fb6c894fb"}, + {file = "rpds_py-0.17.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f6c43b6f97209e370124baf2bf40bb1e8edc25311a158867eb1c3a5d449ebc7a"}, + {file = "rpds_py-0.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7d63ec01fe7c76c2dbb7e972fece45acbb8836e72682bde138e7e039906e2c"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81038ff87a4e04c22e1d81f947c6ac46f122e0c80460b9006e6517c4d842a6ec"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:810685321f4a304b2b55577c915bece4c4a06dfe38f6e62d9cc1d6ca8ee86b99"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25f071737dae674ca8937a73d0f43f5a52e92c2d178330b4c0bb6ab05586ffa6"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa5bfb13f1e89151ade0eb812f7b0d7a4d643406caaad65ce1cbabe0a66d695f"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfe07308b311a8293a0d5ef4e61411c5c20f682db6b5e73de6c7c8824272c256"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a000133a90eea274a6f28adc3084643263b1e7c1a5a66eb0a0a7a36aa757ed74"}, + {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d0e8a6434a3fbf77d11448c9c25b2f25244226cfbec1a5159947cac5b8c5fa4"}, + {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:efa767c220d94aa4ac3a6dd3aeb986e9f229eaf5bce92d8b1b3018d06bed3772"}, + {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dbc56680ecf585a384fbd93cd42bc82668b77cb525343170a2d86dafaed2a84b"}, + {file = "rpds_py-0.17.1-cp39-none-win32.whl", hash = "sha256:270987bc22e7e5a962b1094953ae901395e8c1e1e83ad016c5cfcfff75a15a3f"}, + {file = "rpds_py-0.17.1-cp39-none-win_amd64.whl", hash = "sha256:2a7b2f2f56a16a6d62e55354dd329d929560442bd92e87397b7a9586a32e3e76"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a3264e3e858de4fc601741498215835ff324ff2482fd4e4af61b46512dd7fc83"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f2f3b28b40fddcb6c1f1f6c88c6f3769cd933fa493ceb79da45968a21dccc920"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9584f8f52010295a4a417221861df9bea4c72d9632562b6e59b3c7b87a1522b7"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c64602e8be701c6cfe42064b71c84ce62ce66ddc6422c15463fd8127db3d8066"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:060f412230d5f19fc8c8b75f315931b408d8ebf56aec33ef4168d1b9e54200b1"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9412abdf0ba70faa6e2ee6c0cc62a8defb772e78860cef419865917d86c7342"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9737bdaa0ad33d34c0efc718741abaafce62fadae72c8b251df9b0c823c63b22"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9f0e4dc0f17dcea4ab9d13ac5c666b6b5337042b4d8f27e01b70fae41dd65c57"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1db228102ab9d1ff4c64148c96320d0be7044fa28bd865a9ce628ce98da5973d"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8bbd8e56f3ba25a7d0cf980fc42b34028848a53a0e36c9918550e0280b9d0b6"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:be22ae34d68544df293152b7e50895ba70d2a833ad9566932d750d3625918b82"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bf046179d011e6114daf12a534d874958b039342b347348a78b7cdf0dd9d6041"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a746a6d49665058a5896000e8d9d2f1a6acba8a03b389c1e4c06e11e0b7f40d"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b8bf5b8db49d8fd40f54772a1dcf262e8be0ad2ab0206b5a2ec109c176c0a4"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f7f4cb1f173385e8a39c29510dd11a78bf44e360fb75610594973f5ea141028b"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fbd70cb8b54fe745301921b0816c08b6d917593429dfc437fd024b5ba713c58"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bdf1303df671179eaf2cb41e8515a07fc78d9d00f111eadbe3e14262f59c3d0"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad059a4bd14c45776600d223ec194e77db6c20255578bb5bcdd7c18fd169361"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3664d126d3388a887db44c2e293f87d500c4184ec43d5d14d2d2babdb4c64cad"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:698ea95a60c8b16b58be9d854c9f993c639f5c214cf9ba782eca53a8789d6b19"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:c3d2010656999b63e628a3c694f23020322b4178c450dc478558a2b6ef3cb9bb"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:938eab7323a736533f015e6069a7d53ef2dcc841e4e533b782c2bfb9fb12d84b"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e626b365293a2142a62b9a614e1f8e331b28f3ca57b9f05ebbf4cf2a0f0bdc5"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:380e0df2e9d5d5d339803cfc6d183a5442ad7ab3c63c2a0982e8c824566c5ccc"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b760a56e080a826c2e5af09002c1a037382ed21d03134eb6294812dda268c811"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5576ee2f3a309d2bb403ec292d5958ce03953b0e57a11d224c1f134feaf8c40f"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3c3461ebb4c4f1bbc70b15d20b565759f97a5aaf13af811fcefc892e9197ba"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:637b802f3f069a64436d432117a7e58fab414b4e27a7e81049817ae94de45d8d"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffee088ea9b593cc6160518ba9bd319b5475e5f3e578e4552d63818773c6f56a"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ac732390d529d8469b831949c78085b034bff67f584559340008d0f6041a049"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:93432e747fb07fa567ad9cc7aaadd6e29710e515aabf939dfbed8046041346c6"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b7d9ca34542099b4e185b3c2a2b2eda2e318a7dbde0b0d83357a6d4421b5296"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:0387ce69ba06e43df54e43968090f3626e231e4bc9150e4c3246947567695f68"}, + {file = "rpds_py-0.17.1.tar.gz", hash = "sha256:0210b2668f24c078307260bf88bdac9d6f1093635df5123789bfee4d8d7fc8e7"}, ] [[package]] name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" +category = "dev" optional = false python-versions = ">=3.6,<4" files = [ @@ -1678,39 +1801,41 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.1.7" +version = "0.1.14" description = "An extremely fast Python linter and code formatter, written in Rust." +category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac"}, - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce"}, - {file = "ruff-0.1.7-py3-none-win32.whl", hash = "sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80"}, - {file = "ruff-0.1.7-py3-none-win_amd64.whl", hash = "sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96"}, - {file = "ruff-0.1.7-py3-none-win_arm64.whl", hash = "sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e"}, - {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, + {file = "ruff-0.1.14-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:96f76536df9b26622755c12ed8680f159817be2f725c17ed9305b472a757cdbb"}, + {file = "ruff-0.1.14-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ab3f71f64498c7241123bb5a768544cf42821d2a537f894b22457a543d3ca7a9"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7060156ecc572b8f984fd20fd8b0fcb692dd5d837b7606e968334ab7ff0090ab"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a53d8e35313d7b67eb3db15a66c08434809107659226a90dcd7acb2afa55faea"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bea9be712b8f5b4ebed40e1949379cfb2a7d907f42921cf9ab3aae07e6fba9eb"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2270504d629a0b064247983cbc495bed277f372fb9eaba41e5cf51f7ba705a6a"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80258bb3b8909b1700610dfabef7876423eed1bc930fe177c71c414921898efa"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:653230dd00aaf449eb5ff25d10a6e03bc3006813e2cb99799e568f55482e5cae"}, + {file = "ruff-0.1.14-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b3acc6c4e6928459ba9eb7459dd4f0c4bf266a053c863d72a44c33246bfdbf"}, + {file = "ruff-0.1.14-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6b3dadc9522d0eccc060699a9816e8127b27addbb4697fc0c08611e4e6aeb8b5"}, + {file = "ruff-0.1.14-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1c8eca1a47b4150dc0fbec7fe68fc91c695aed798532a18dbb1424e61e9b721f"}, + {file = "ruff-0.1.14-py3-none-musllinux_1_2_i686.whl", hash = "sha256:62ce2ae46303ee896fc6811f63d6dabf8d9c389da0f3e3f2bce8bc7f15ef5488"}, + {file = "ruff-0.1.14-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b2027dde79d217b211d725fc833e8965dc90a16d0d3213f1298f97465956661b"}, + {file = "ruff-0.1.14-py3-none-win32.whl", hash = "sha256:722bafc299145575a63bbd6b5069cb643eaa62546a5b6398f82b3e4403329cab"}, + {file = "ruff-0.1.14-py3-none-win_amd64.whl", hash = "sha256:e3d241aa61f92b0805a7082bd89a9990826448e4d0398f0e2bc8f05c75c63d99"}, + {file = "ruff-0.1.14-py3-none-win_arm64.whl", hash = "sha256:269302b31ade4cde6cf6f9dd58ea593773a37ed3f7b97e793c8594b262466b67"}, + {file = "ruff-0.1.14.tar.gz", hash = "sha256:ad3f8088b2dfd884820289a06ab718cde7d38b94972212cc4ba90d5fbc9955f3"}, ] [[package]] name = "setuptools" -version = "69.0.2" +version = "69.0.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, - {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, + {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, + {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, ] [package.extras] @@ -1722,6 +1847,7 @@ testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jar name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1733,6 +1859,7 @@ files = [ name = "stack-data" version = "0.6.3" description = "Extract data from python stack frames and tracebacks for informative displays" +category = "dev" optional = false python-versions = "*" files = [ @@ -1752,6 +1879,7 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "tenacity" version = "8.2.3" description = "Retry code until it succeeds" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1766,6 +1894,7 @@ doc = ["reno", "sphinx", "tornado (>=4.5)"] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1777,6 +1906,7 @@ files = [ name = "toposort" version = "1.10" description = "Implements a topological sort algorithm." +category = "dev" optional = false python-versions = "*" files = [ @@ -1786,13 +1916,14 @@ files = [ [[package]] name = "traitlets" -version = "5.14.0" +version = "5.14.1" description = "Traitlets Python configuration system" +category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "traitlets-5.14.0-py3-none-any.whl", hash = "sha256:f14949d23829023013c47df20b4a76ccd1a85effb786dc060f34de7948361b33"}, - {file = "traitlets-5.14.0.tar.gz", hash = "sha256:fcdaa8ac49c04dfa0ed3ee3384ef6dfdb5d6f3741502be247279407679296772"}, + {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, + {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, ] [package.extras] @@ -1801,19 +1932,21 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "typing-extensions" -version = "4.8.0" +version = "4.9.0" description = "Backported and Experimental Type Hints for Python 3.8+" +category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, ] [[package]] name = "typing-inspect" version = "0.9.0" description = "Runtime inspection utilities for typing module." +category = "dev" optional = false python-versions = "*" files = [ @@ -1827,35 +1960,38 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "1.26.18" +version = "2.1.0" description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.8" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, + {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, ] [package.extras] -brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "wcwidth" -version = "0.2.12" +version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" +category = "dev" optional = false python-versions = "*" files = [ - {file = "wcwidth-0.2.12-py2.py3-none-any.whl", hash = "sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"}, - {file = "wcwidth-0.2.12.tar.gz", hash = "sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02"}, + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] [[package]] name = "websocket-client" version = "1.7.0" description = "WebSocket client for Python with low level API options" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1872,6 +2008,7 @@ test = ["websockets"] name = "websockets" version = "8.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +category = "dev" optional = false python-versions = ">=3.6.1" files = [ @@ -1903,6 +2040,7 @@ files = [ name = "websockets" version = "9.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +category = "dev" optional = false python-versions = ">=3.6.1" files = [ @@ -1945,6 +2083,7 @@ files = [ name = "websockets" version = "12.0" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2026,6 +2165,7 @@ files = [ name = "zipp" version = "3.17.0" description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2040,4 +2180,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8,<4.0" -content-hash = "f15c1204183c5fa2cde9e0529216a463025859d27fa901c43ab18c38eda81c97" +content-hash = "5fff1dbbb7524500356f7d2e5ce519aa704700e9f4ef05763f69567f1c88237c" diff --git a/pyproject.toml b/pyproject.toml index 181458cb..26e9357f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ cosl = ">= 0.0.5" tenacity = ">=8.0.1" pure-sasl = ">=0.6.2" jsonschema = ">=4.10" -cryptography = "^39.0.0" +cryptography = ">39.0.0" pydantic ="^1.10.7" pyyaml = "^6.0.1" @@ -67,6 +67,7 @@ optional = true [tool.poetry.group.unit.dependencies] pytest = ">=7.2" coverage = {extras = ["toml"], version = ">7.0"} +pytest-mock = "^3.11.1" [tool.poetry.group.integration] optional = true @@ -82,9 +83,9 @@ pure-sasl = ">=0.5" kafka-python = ">=2.0" requests = ">2.25" -pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v6.1.1", subdirectory = "python/pytest_plugins/pytest_operator_cache"} +pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v7", subdirectory = "python/pytest_plugins/pytest_operator_cache"} # To be enabled if we are using groups on integration tests -# pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v6.1.1", subdirectory = "python/pytest_plugins/pytest_operator_groups"} +# pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v7", subdirectory = "python/pytest_plugins/pytest_operator_groups"} [tool.poetry.group.format.dependencies] diff --git a/src/charm.py b/src/charm.py index 07a0aa98..ddda24f6 100755 --- a/src/charm.py +++ b/src/charm.py @@ -5,58 +5,45 @@ """Charmed Machine Operator for Apache Kafka.""" import logging -import subprocess import time -from typing import MutableMapping, Optional from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_agent.v0.cos_agent import COSAgentProvider from charms.operator_libs_linux.v0 import sysctl from charms.operator_libs_linux.v1.snap import SnapError from charms.rolling_ops.v0.rollingops import RollingOpsManager, RunWithLock -from ops.charm import ( - ActionEvent, - RelationChangedEvent, - RelationCreatedEvent, - RelationEvent, - StorageAttachedEvent, - StorageDetachingEvent, - StorageEvent, -) +from ops.charm import StorageAttachedEvent, StorageDetachingEvent, StorageEvent from ops.framework import EventBase from ops.main import main -from ops.model import ActiveStatus, Relation, StatusBase - -from auth import KafkaAuth -from config import KafkaConfig +from ops.model import ActiveStatus, StatusBase + +from core.cluster import ClusterState +from core.structured_config import CharmConfig +from events.password_actions import PasswordActionEvents +from events.provider import KafkaProvider +from events.tls import TLSHandler +from events.upgrade import KafkaDependencyModel, KafkaUpgrade +from events.zookeeper import ZooKeeperHandler from health import KafkaHealth from literals import ( - ADMIN_USER, CHARM_KEY, DEPENDENCIES, - INTERNAL_USERS, + GROUP, JMX_EXPORTER_PORT, LOGS_RULES_DIR, METRICS_RULES_DIR, OS_REQUIREMENTS, PEER, REL_NAME, - ZK, + USER, DebugLevel, Status, + Substrate, ) -from provider import KafkaProvider -from snap import KafkaSnap -from structured_config import CharmConfig -from tls import KafkaTLS -from upgrade import KafkaDependencyModel, KafkaUpgrade -from utils import ( - broker_active, - generate_password, - safe_get_file, - set_snap_mode_bits, - set_snap_ownership, -) +from managers.auth import AuthManager +from managers.config import KafkaConfigManager +from managers.tls import TLSManager +from workload import KafkaWorkload logger = logging.getLogger(__name__) @@ -69,45 +56,37 @@ class KafkaCharm(TypedCharmBase[CharmConfig]): def __init__(self, *args): super().__init__(*args) self.name = CHARM_KEY - self.snap = KafkaSnap() - self.kafka_config = KafkaConfig(self) - self.sysctl_config = sysctl.Config(name=CHARM_KEY) - self.tls = KafkaTLS(self) - self.provider = KafkaProvider(self) + self.substrate: Substrate = "vm" + self.workload = KafkaWorkload() + self.state = ClusterState(self, substrate=self.substrate) + self.health = KafkaHealth(self) - self.restart = RollingOpsManager(self, relation="restart", callback=self._restart) + + # HANDLERS + + self.password_action_events = PasswordActionEvents(self) + self.zookeeper = ZooKeeperHandler(self) + self.tls = TLSHandler(self) + self.provider = KafkaProvider(self) self.upgrade = KafkaUpgrade( self, dependency_model=KafkaDependencyModel( - **DEPENDENCIES # pyright: ignore[reportGeneralTypeIssues] + **DEPENDENCIES # pyright: ignore[reportGeneralTypeIssues, reportArgumentType] ), ) - self.framework.observe(getattr(self.on, "start"), self._on_start) - self.framework.observe(getattr(self.on, "install"), self._on_install) - self.framework.observe(getattr(self.on, "config_changed"), self._on_config_changed) - self.framework.observe(getattr(self.on, "update_status"), self._on_update_status) - self.framework.observe(getattr(self.on, "remove"), self._on_remove) - - self.framework.observe(self.on[PEER].relation_changed, self._on_config_changed) - - self.framework.observe(self.on[ZK].relation_created, self._on_zookeeper_created) - self.framework.observe(self.on[ZK].relation_joined, self._on_zookeeper_changed) - self.framework.observe(self.on[ZK].relation_changed, self._on_zookeeper_changed) - self.framework.observe(self.on[ZK].relation_broken, self._on_zookeeper_broken) + # MANAGERS - self.framework.observe(getattr(self.on, "set_password_action"), self._set_password_action) - self.framework.observe( - getattr(self.on, "get_admin_credentials_action"), self._get_admin_credentials_action + self.config_manager = KafkaConfigManager( + self.state, self.workload, self.config, current_version=self.upgrade.current_version ) + self.tls_manager = TLSManager(self.state, self.workload, substrate=self.substrate) + self.auth_manager = AuthManager(self.state, self.workload, self.config_manager.kafka_opts) - self.framework.observe( - getattr(self.on, "data_storage_attached"), self._on_storage_attached - ) - self.framework.observe( - getattr(self.on, "data_storage_detaching"), self._on_storage_detaching - ) + # LIB HANDLERS + self.sysctl_config = sysctl.Config(name=CHARM_KEY) + self.restart = RollingOpsManager(self, relation="restart", callback=self._restart) self._grafana_agent = COSAgentProvider( self, metrics_endpoints=[ @@ -117,98 +96,123 @@ def __init__(self, *args): ], metrics_rules_dir=METRICS_RULES_DIR, logs_rules_dir=LOGS_RULES_DIR, - log_slots=[f"{self.snap.SNAP_NAME}:{self.snap.LOG_SLOT}"], + log_slots=[f"{self.workload.SNAP_NAME}:{self.workload.LOG_SLOT}"], ) - @property - def peer_relation(self) -> Optional[Relation]: - """The cluster peer relation.""" - return self.model.get_relation(PEER) + self.framework.observe(getattr(self.on, "install"), self._on_install) + self.framework.observe(getattr(self.on, "start"), self._on_start) + self.framework.observe(getattr(self.on, "config_changed"), self._on_config_changed) + self.framework.observe(getattr(self.on, "update_status"), self._on_update_status) + self.framework.observe(getattr(self.on, "remove"), self._on_remove) - @property - def app_peer_data(self) -> MutableMapping[str, str]: - """Application peer relation data object.""" - if not self.peer_relation: - return {} + self.framework.observe(self.on[PEER].relation_changed, self._on_config_changed) - return self.peer_relation.data[self.app] + self.framework.observe( + getattr(self.on, "data_storage_attached"), self._on_storage_attached + ) + self.framework.observe( + getattr(self.on, "data_storage_detaching"), self._on_storage_detaching + ) - @property - def unit_peer_data(self) -> MutableMapping[str, str]: - """Unit peer relation data object.""" - if not self.peer_relation: - return {} + def _on_install(self, _) -> None: + """Handler for `install` event.""" + if self.workload.install(): + self._set_os_config() + self.config_manager.set_environment() + else: + self._set_status(Status.SNAP_NOT_INSTALLED) - return self.peer_relation.data[self.unit] + def _on_start(self, event: EventBase) -> None: + """Handler for `start` event.""" + self._set_status(self.state.ready_to_start) + if not isinstance(self.unit.status, ActiveStatus): + event.defer() + return - @property - def unit_host(self) -> str: - """Return the own host.""" - return self.unit_peer_data.get("private-address", "") + # required settings given zookeeper connection config has been created + self.config_manager.set_zk_jaas_config() + self.config_manager.set_server_properties() + self.config_manager.set_client_properties() - @property - def ready_to_start(self) -> bool: - """Check for active ZooKeeper relation and adding of inter-broker auth username. + # start kafka service + self.workload.start() + logger.info("Kafka snap started") - Returns: - True if ZK is related and `sync` user has been added. False otherwise. - """ - if not self.peer_relation: - self._set_status(Status.NO_PEER_RELATION) - return False + # check for connection + self._on_update_status(event) - if not self.kafka_config.zookeeper_related: - self._set_status(Status.ZK_NOT_RELATED) - return False + # only log once on successful 'on-start' run + if isinstance(self.unit.status, ActiveStatus): + logger.info(f'Broker {self.unit.name.split("/")[1]} connected') - if not self.kafka_config.zookeeper_connected: - self._set_status(Status.ZK_NO_DATA) - return False + def _on_config_changed(self, event: EventBase) -> None: + """Generic handler for most `config_changed` events across relations.""" + # only overwrite properties if service is already active + if not self.healthy or not self.upgrade.idle: + event.defer() + return - # TLS must be enabled for Kafka and ZK or disabled for both - if self.tls.enabled ^ ( - self.kafka_config.zookeeper_config.get("tls", "disabled") == "enabled" - ): - self._set_status(Status.ZK_TLS_MISMATCH) - return False + # Load current properties set in the charm workload + properties = self.workload.read(self.workload.paths.server_properties) + properties_changed = set(properties) ^ set(self.config_manager.server_properties) - if not self.kafka_config.internal_user_credentials: - self._set_status(Status.NO_BROKER_CREDS) - return False + zk_jaas = self.workload.read(self.workload.paths.zk_jaas) + zk_jaas_changed = set(zk_jaas) ^ set(self.config_manager.zk_jaas_config.splitlines()) - return True + if not properties or not zk_jaas: + # Event fired before charm has properly started + event.defer() + return - @property - def healthy(self) -> bool: - """Checks and updates various charm lifecycle states. + # update environment + self.config_manager.set_environment() - Is slow to fail due to retries, to be used sparingly. + if zk_jaas_changed: + clean_broker_jaas = [conf.strip() for conf in zk_jaas] + clean_config_jaas = [ + conf.strip() for conf in self.config_manager.zk_jaas_config.splitlines() + ] + logger.info( + ( + f'Broker {self.unit.name.split("/")[1]} updating JAAS config - ' + f"OLD JAAS = {set(clean_broker_jaas) - set(clean_config_jaas)}, " + f"NEW JAAS = {set(clean_config_jaas) - set(clean_broker_jaas)}" + ) + ) + self.config_manager.set_zk_jaas_config() - Returns: - True if service is alive and active. Otherwise False - """ - if not self.ready_to_start: - return False + if properties_changed: + logger.info( + ( + f'Broker {self.unit.name.split("/")[1]} updating config - ' + f"OLD PROPERTIES = {set(properties) - set(self.config_manager.server_properties)}, " + f"NEW PROPERTIES = {set(self.config_manager.server_properties) - set(properties)}" + ) + ) + self.config_manager.set_server_properties() - if not self.snap.active(): - self._set_status(Status.SNAP_NOT_RUNNING) - return False + if zk_jaas_changed or properties_changed: + if isinstance(event, StorageEvent): # to get new storages + self.on[f"{self.restart.name}"].acquire_lock.emit( + callback_override="_disable_enable_restart" + ) + else: + logger.info("Acquiring lock from _on_config_changed...") + self.on[f"{self.restart.name}"].acquire_lock.emit() - return True + # update client_properties whenever possible + self.config_manager.set_client_properties() - def _on_remove(self, _) -> None: - """Handler for stop.""" - self.sysctl_config.remove() + # If Kafka is related to client charms, update their information. + if self.model.relations.get(REL_NAME, None) and self.unit.is_leader(): + self.provider.update_connection_info() def _on_update_status(self, event: EventBase) -> None: """Handler for `update-status` events.""" if not self.healthy or not self.upgrade.idle: return - if not broker_active( - unit=self.unit, - zookeeper_config=self.kafka_config.zookeeper_config, - ): + if not self.state.zookeeper.broker_active(): self._set_status(Status.ZK_NOT_CONNECTED) return @@ -227,151 +231,31 @@ def _on_update_status(self, event: EventBase) -> None: self._set_status(Status.ACTIVE) + def _on_remove(self, _) -> None: + """Handler for stop.""" + self.sysctl_config.remove() + def _on_storage_attached(self, event: StorageAttachedEvent) -> None: """Handler for `storage_attached` events.""" # new dirs won't be used until topic partitions are assigned to it # either automatically for new topics, or manually for existing # set status only for running services, not on startup - if self.snap.active(): + if self.workload.active(): self._set_status(Status.ADDED_STORAGE) - set_snap_ownership(path=self.snap.DATA_PATH) - set_snap_mode_bits(path=self.snap.DATA_PATH) + self.workload.exec(f"chown -R {USER}:{GROUP} {self.workload.paths.data_path}") + self.workload.exec(f"chmod -R 770 {self.workload.paths.data_path}") self._on_config_changed(event) def _on_storage_detaching(self, event: StorageDetachingEvent) -> None: """Handler for `storage_detaching` events.""" # in the case where there may be replication recovery may be possible - if self.peer_relation and len(self.peer_relation.units): + if self.state.brokers and len(self.state.brokers) > 1: self._set_status(Status.REMOVED_STORAGE) else: self._set_status(Status.REMOVED_STORAGE_NO_REPL) self._on_config_changed(event) - def _on_install(self, _) -> None: - """Handler for `install` event.""" - if self.snap.install(): - self._set_os_config() - self.kafka_config.set_environment() - self._set_status(Status.ZK_NOT_RELATED) - else: - self._set_status(Status.SNAP_NOT_INSTALLED) - - def _on_zookeeper_created(self, event: RelationCreatedEvent) -> None: - """Handler for `zookeeper_relation_created` events.""" - if self.unit.is_leader(): - event.relation.data[self.app].update({"chroot": "/" + self.app.name}) - - def _on_zookeeper_changed(self, event: RelationChangedEvent) -> None: - """Handler for `zookeeper_relation_created/joined/changed` events, ensuring internal users get created.""" - if not self.kafka_config.zookeeper_connected: - logger.debug("No information found from ZooKeeper relation") - self._set_status(Status.ZK_NO_DATA) - return - - # TLS must be enabled for Kafka and ZK or disabled for both - if self.tls.enabled ^ ( - self.kafka_config.zookeeper_config.get("tls", "disabled") == "enabled" - ): - event.defer() - self._set_status(Status.ZK_TLS_MISMATCH) - return - - # do not create users until certificate + keystores created - # otherwise unable to authenticate to ZK - if self.tls.enabled and not self.tls.certificate: - event.defer() - self._set_status(Status.NO_CERT) - return - - if not self.kafka_config.internal_user_credentials and self.unit.is_leader(): - # loading the minimum config needed to authenticate to zookeeper - self.kafka_config.set_zk_jaas_config() - self.kafka_config.set_server_properties() - - try: - internal_user_credentials = self._create_internal_credentials() - except (KeyError, RuntimeError, subprocess.CalledProcessError) as e: - logger.warning(str(e)) - event.defer() - return - - # only set to relation data when all set - for username, password in internal_user_credentials: - self.set_secret(scope="app", key=f"{username}-password", value=password) - - self._on_config_changed(event) - - def _on_zookeeper_broken(self, _: RelationEvent) -> None: - """Handler for `zookeeper_relation_broken` event, ensuring charm blocks.""" - self.snap.stop_snap_service() - - logger.info(f'Broker {self.unit.name.split("/")[1]} disconnected') - self._set_status(Status.ZK_NOT_RELATED) - - def _on_start(self, event: EventBase) -> None: - """Handler for `start` event.""" - if not self.ready_to_start: - event.defer() - return - - # required settings given zookeeper connection config has been created - self.kafka_config.set_zk_jaas_config() - self.kafka_config.set_server_properties() - self.kafka_config.set_client_properties() - - # start kafka service - self.snap.start_snap_service() - logger.info("Kafka snap started") - - # check for connection - self._on_update_status(event) - - # only log once on successful 'on-start' run - if isinstance(self.unit.status, ActiveStatus): - logger.info(f'Broker {self.unit.name.split("/")[1]} connected') - - def _on_config_changed(self, event: EventBase) -> None: - """Generic handler for most `config_changed` events across relations.""" - # only overwrite properties if service is already active - if not self.healthy or not self.upgrade.idle: - event.defer() - return - - # Load current properties set in the charm workload - properties = safe_get_file(self.kafka_config.server_properties_filepath) - if not properties: - # Event fired before charm has properly started - event.defer() - return - - if set(properties) ^ set(self.kafka_config.server_properties): - logger.info( - ( - f'Broker {self.unit.name.split("/")[1]} updating config - ' - f"OLD PROPERTIES = {set(properties) - set(self.kafka_config.server_properties)}, " - f"NEW PROPERTIES = {set(self.kafka_config.server_properties) - set(properties)}" - ) - ) - self.kafka_config.set_server_properties() - - self.kafka_config.set_environment() - - if isinstance(event, StorageEvent): # to get new storages - self.on[f"{self.restart.name}"].acquire_lock.emit( - callback_override="_disable_enable_restart" - ) - else: - logger.info("Acquiring lock from _on_config_changed...") - self.on[f"{self.restart.name}"].acquire_lock.emit() - - # update client_properties whenever possible - self.kafka_config.set_client_properties() - - # If Kafka is related to client charms, update their information. - if self.model.relations.get(REL_NAME, None) and self.unit.is_leader(): - self.provider.update_connection_info() - def _restart(self, event: EventBase) -> None: """Handler for `rolling_ops` restart events.""" # only attempt restart if service is already active @@ -379,13 +263,13 @@ def _restart(self, event: EventBase) -> None: event.defer() return - self.snap.restart_snap_service() + self.workload.restart() # FIXME: This logic should be improved as part of ticket DPE-3155 # For more information, please refer to https://warthogs.atlassian.net/browse/DPE-3155 - time.sleep(20.0) + time.sleep(10.0) - if self.healthy: + if self.workload.active(): logger.info(f'Broker {self.unit.name.split("/")[1]} restarted') else: logger.error(f"Broker {self.unit.name.split('/')[1]} failed to restart") @@ -397,110 +281,15 @@ def _disable_enable_restart(self, event: RunWithLock) -> None: event.defer() return - self.snap.disable_enable() - self.snap.start_snap_service() + self.workload.disable_enable() + self.workload.start() - if self.healthy: + if self.workload.active(): logger.info(f'Broker {self.unit.name.split("/")[1]} restarted') else: logger.error(f"Broker {self.unit.name.split('/')[1]} failed to restart") return - def _set_password_action(self, event: ActionEvent) -> None: - """Handler for set-password action. - - Set the password for a specific user, if no passwords are passed, generate them. - """ - if not self.unit.is_leader(): - msg = "Password rotation must be called on leader unit" - logger.error(msg) - event.fail(msg) - return - - if not self.healthy: - event.defer() - return - - username = event.params["username"] - new_password = event.params.get("password", generate_password()) - - if new_password in self.kafka_config.internal_user_credentials.values(): - msg = "Password already exists, please choose a different password." - logger.error(msg) - event.fail(msg) - return - - try: - self._update_internal_user(username=username, password=new_password) - except Exception as e: - logger.error(str(e)) - event.fail(f"unable to set password for {username}") - - # Store the password on application databag - self.set_secret(scope="app", key=f"{username}-password", value=new_password) - event.set_results({f"{username}-password": new_password}) - - def _get_admin_credentials_action(self, event: ActionEvent) -> None: - client_properties = safe_get_file(self.kafka_config.client_properties_filepath) - - if not client_properties: - msg = "client.properties file not found on target unit." - logger.error(msg) - event.fail(msg) - return - - admin_properties = set(client_properties) - set(self.kafka_config.tls_properties) - - event.set_results( - { - "username": ADMIN_USER, - "password": self.kafka_config.internal_user_credentials[ADMIN_USER], - "client-properties": "\n".join(admin_properties), - } - ) - - def _update_internal_user(self, username: str, password: str) -> None: - """Updates internal SCRAM usernames and passwords. - - Raises: - RuntimeError if called from non-leader unit - KeyError if attempted to update non-leader unit - subprocess.CalledProcessError if command to ZooKeeper failed - """ - if not self.unit.is_leader(): - raise RuntimeError("Cannot update internal user from non-leader unit.") - - if username not in INTERNAL_USERS: - raise KeyError( - f"Can only update internal charm users: {INTERNAL_USERS}, not {username}." - ) - - # do not start units until SCRAM users have been added to ZooKeeper for server-server auth - kafka_auth = KafkaAuth(self) - kafka_auth.add_user( - username=username, - password=password, - zk_auth=True, - ) - - def _create_internal_credentials(self) -> list[tuple[str, str]]: - """Creates internal SCRAM users during cluster start. - - Returns: - List of (username, password) for all internal users - - - Raises: - RuntimeError if called from non-leader unit - KeyError if attempted to update non-leader unit - subprocess.CalledProcessError if command to ZooKeeper failed - """ - credentials = [(username, generate_password()) for username in INTERNAL_USERS] - for username, password in credentials: - self._update_internal_user(username=username, password=password) - - return credentials - def _set_os_config(self) -> None: """Sets sysctl config.""" try: @@ -509,44 +298,24 @@ def _set_os_config(self) -> None: logger.error(f"Error setting values on sysctl: {e.message}") self._set_status(Status.SYSCONF_NOT_POSSIBLE) - def get_secret(self, scope: str, key: str) -> Optional[str]: - """Get TLS secret from the secret storage. + @property + def healthy(self) -> bool: + """Checks and updates various charm lifecycle states. - Args: - scope: whether this secret is for a `unit` or `app` - key: the secret key name + Is slow to fail due to retries, to be used sparingly. Returns: - String of key value. - None if non-existent key + True if service is alive and active. Otherwise False """ - if scope == "unit": - return self.unit_peer_data.get(key, None) - elif scope == "app": - return self.app_peer_data.get(key, None) - else: - raise RuntimeError("Unknown secret scope.") + self._set_status(self.state.ready_to_start) + if not isinstance(self.unit.status, ActiveStatus): + return False - def set_secret(self, scope: str, key: str, value: Optional[str]) -> None: - """Get TLS secret from the secret storage. + if not self.workload.active(): + self._set_status(Status.SNAP_NOT_RUNNING) + return False - Args: - scope: whether this secret is for a `unit` or `app` - key: the secret key name - value: the value for the secret key - """ - if scope == "unit": - if not value: - self.unit_peer_data.update({key: ""}) - return - self.unit_peer_data.update({key: value}) - elif scope == "app": - if not value: - self.app_peer_data.update({key: ""}) - return - self.app_peer_data.update({key: value}) - else: - raise RuntimeError("Unknown secret scope.") + return True def _set_status(self, key: Status) -> None: """Sets charm status.""" diff --git a/src/core/cluster.py b/src/core/cluster.py new file mode 100644 index 00000000..9f3d985a --- /dev/null +++ b/src/core/cluster.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Objects representing the state of KafkaCharm.""" + +import os + +from ops import Framework, Object, Relation + +from core.models import KafkaBroker, KafkaCluster, ZooKeeper +from literals import ( + INTERNAL_USERS, + PEER, + REL_NAME, + SECURITY_PROTOCOL_PORTS, + ZK, + Status, + Substrate, +) + + +class ClusterState(Object): + """Properties and relations of the charm.""" + + def __init__(self, charm: Framework | Object, substrate: Substrate): + super().__init__(parent=charm, key="charm_state") + self.substrate: Substrate = substrate + + # --- RELATIONS --- + + @property + def peer_relation(self) -> Relation | None: + """The cluster peer relation.""" + return self.model.get_relation(PEER) + + @property + def zookeeper_relation(self) -> Relation | None: + """The ZooKeeper relation.""" + return self.model.get_relation(ZK) + + @property + def client_relations(self) -> set[Relation]: + """The relations of all client applications.""" + return set(self.model.relations[REL_NAME]) + + # --- CORE COMPONENTS --- + + @property + def broker(self) -> KafkaBroker: + """The server state of the current running Unit.""" + return KafkaBroker( + relation=self.peer_relation, component=self.model.unit, substrate=self.substrate + ) + + @property + def cluster(self) -> KafkaCluster: + """The cluster state of the current running App.""" + return KafkaCluster( + relation=self.peer_relation, component=self.model.app, substrate=self.substrate + ) + + @property + def brokers(self) -> set[KafkaBroker]: + """Grabs all servers in the current peer relation, including the running unit server. + + Returns: + Set of KafkaBrokers in the current peer relation, including the running unit server. + """ + if not self.peer_relation: + return set() + + servers = set() + for unit in self.peer_relation.units: + servers.add( + KafkaBroker(relation=self.peer_relation, component=unit, substrate=self.substrate) + ) + servers.add(self.broker) + + return servers + + @property + def zookeeper(self) -> ZooKeeper: + """The ZooKeeper relation state.""" + return ZooKeeper( + relation=self.zookeeper_relation, + component=self.model.app, + substrate=self.substrate, + local_app=self.model.app, + local_unit=self.model.unit, + ) + + # ---- GENERAL VALUES ---- + + @property + def super_users(self) -> str: + """Generates all users with super/admin permissions for the cluster from relations. + + Formatting allows passing to the `super.users` property. + + Returns: + Semicolon delimited string of current super users + """ + super_users = set(INTERNAL_USERS) + for relation in self.client_relations: + if not relation or not relation.app: + continue + + extra_user_roles = relation.data[relation.app].get("extra-user-roles", "") + password = self.cluster.relation_data.get(f"relation-{relation.id}", None) + # if passwords are set for client admins, they're good to load + if "admin" in extra_user_roles and password is not None: + super_users.add(f"relation-{relation.id}") + + super_users_arg = sorted([f"User:{user}" for user in super_users]) + + return ";".join(super_users_arg) + + @property + def port(self) -> int: + """Return the port to be used internally.""" + return ( + SECURITY_PROTOCOL_PORTS["SASL_SSL"].client + if (self.cluster.tls_enabled and self.broker.certificate) + else SECURITY_PROTOCOL_PORTS["SASL_PLAINTEXT"].client + ) + + @property + def bootstrap_server(self) -> list[str]: + """The current Kafka uris formatted for the `bootstrap-server` command flag. + + Returns: + List of `bootstrap-server` servers + """ + if not self.peer_relation: + return [] + + return [f"{host}:{self.port}" for host in self.unit_hosts] + + @property + def log_dirs(self) -> str: + """Builds the necessary log.dirs based on mounted storage volumes. + + Returns: + String of log.dirs property value to be set + """ + return ",".join([os.fspath(storage.location) for storage in self.model.storages["data"]]) + + @property + def unit_hosts(self) -> list[str]: + """Return list of application unit hosts.""" + hosts = [broker.host for broker in self.brokers] + return hosts + + @property + def planned_units(self) -> int: + """Return the planned units for the charm.""" + return self.model.app.planned_units() + + @property + def ready_to_start(self) -> Status: + """Check for active ZooKeeper relation and adding of inter-broker auth username. + + Returns: + True if ZK is related and `sync` user has been added. False otherwise. + """ + if not self.peer_relation: + return Status.NO_PEER_RELATION + + if not self.zookeeper.zookeeper_related: + return Status.ZK_NOT_RELATED + + if not self.zookeeper.zookeeper_connected: + return Status.ZK_NO_DATA + + # TLS must be enabled for Kafka and ZK or disabled for both + if self.cluster.tls_enabled ^ self.zookeeper.tls: + return Status.ZK_TLS_MISMATCH + + if not self.cluster.internal_user_credentials: + return Status.NO_BROKER_CREDS + + return Status.ACTIVE diff --git a/src/core/models.py b/src/core/models.py new file mode 100644 index 00000000..cf435f4d --- /dev/null +++ b/src/core/models.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Collection of state objects for the Kafka relations, apps and units.""" + +import logging +from typing import MutableMapping + +from charms.zookeeper.v0.client import QuorumLeaderNotFoundError, ZooKeeperManager +from kazoo.exceptions import AuthFailedError, NoNodeError +from ops.model import Application, Relation, Unit +from tenacity import retry, retry_if_not_result, stop_after_attempt, wait_fixed + +from literals import INTERNAL_USERS, Substrate + +logger = logging.getLogger(__name__) + + +class StateBase: + """Base state object.""" + + def __init__( + self, relation: Relation | None, component: Unit | Application, substrate: Substrate + ): + self.relation = relation + self.component = component + self.substrate = substrate + + @property + def relation_data(self) -> MutableMapping[str, str]: + """The raw relation data.""" + if not self.relation: + return {} + + return self.relation.data[self.component] + + def update(self, items: dict[str, str]) -> None: + """Writes to relation_data.""" + if not self.relation: + return + + self.relation_data.update(items) + + +class KafkaCluster(StateBase): + """State collection metadata for the peer relation.""" + + def __init__(self, relation: Relation | None, component: Application, substrate: Substrate): + super().__init__(relation, component, substrate) + self.app = component + + @property + def internal_user_credentials(self) -> dict[str, str]: + """The charm internal usernames and passwords, e.g `sync` and `admin`. + + Returns: + Dict of usernames and passwords + """ + credentials = { + user: password + for user in INTERNAL_USERS + if (password := self.relation_data.get(f"{user}-password")) + } + + if not len(credentials) == len(INTERNAL_USERS): + return {} + + return credentials + + @property + def client_passwords(self) -> dict[str, str]: + """Usernames and passwords of related client applications.""" + return {key: value for key, value in self.relation_data.items() if "relation-" in key} + + # --- TLS --- + + @property + def tls_enabled(self) -> bool: + """Flag to check if the cluster should run with TLS. + + Returns: + True if TLS encryption should be active. Otherwise False + """ + return self.relation_data.get("tls", "disabled") == "enabled" + + @property + def mtls_enabled(self) -> bool: + """Flag to check if the cluster should run with mTLS. + + Returns: + True if TLS encryption should be active. Otherwise False + """ + return self.relation_data.get("mtls", "disabled") == "enabled" + + +class KafkaBroker(StateBase): + """State collection metadata for a charm unit.""" + + def __init__(self, relation: Relation | None, component: Unit, substrate: Substrate): + super().__init__(relation, component, substrate) + self.unit = component + + @property + def unit_id(self) -> int: + """The id of the unit from the unit name. + + e.g kafka/2 --> 2 + """ + return int(self.component.name.split("/")[1]) + + @property + def host(self) -> str: + """Return the hostname of a unit.""" + host = "" + if self.substrate == "vm": + for key in ["hostname", "ip", "private-address"]: + if host := self.relation_data.get(key, ""): + break + if self.substrate == "k8s": + host = f"{self.component.name.split('/')[0]}-{self.unit_id}.{self.component.name.split('/')[0]}-endpoints" + + return host + + # --- TLS --- + + @property + def private_key(self) -> str | None: + """The unit private-key set during `certificates_joined`. + + Returns: + String of key contents + None if key not yet generated + """ + return self.relation_data.get("private-key") + + @property + def csr(self) -> str | None: + """The unit cert signing request. + + Returns: + String of csr contents + None if csr not yet generated + """ + return self.relation_data.get("csr") + + @property + def certificate(self) -> str | None: + """The signed unit certificate from the provider relation. + + Returns: + String of cert contents in PEM format + None if cert not yet generated/signed + """ + return self.relation_data.get("certificate") + + @property + def ca(self) -> str | None: + """The ca used to sign unit cert. + + Returns: + String of ca contents in PEM format + None if cert not yet generated/signed + """ + return self.relation_data.get("ca") + + @property + def keystore_password(self) -> str | None: + """The unit keystore password set during `certificates_joined`. + + Returns: + String of password + None if password not yet generated + """ + return self.relation_data.get("keystore-password") + + @property + def truststore_password(self) -> str | None: + """The unit truststore password set during `certificates_joined`. + + Returns: + String of password + None if password not yet generated + """ + return self.relation_data.get("truststore-password") + + +class ZooKeeper(StateBase): + """State collection metadata for a the Zookeeper relation.""" + + def __init__( + self, + relation: Relation | None, + component: Application, + substrate: Substrate, + local_unit: Unit, + local_app: Application | None = None, + ): + super().__init__(relation, component, substrate) + self._local_app = local_app + self._local_unit = local_unit + + # APPLICATION DATA + + @property + def remote_app_data(self) -> MutableMapping[str, str]: + """Zookeeper relation data object.""" + if not self.relation or not self.relation.app: + return {} + + return self.relation.data[self.relation.app] + + @property + def app_data(self) -> MutableMapping[str, str]: + """Zookeeper relation data object.""" + if not self.relation or not self._local_app: + return {} + + return self.relation.data[self._local_app] + + # --- RELATION PROPERTIES --- + + @property + def zookeeper_related(self) -> bool: + """Checks if there is a relation with ZooKeeper. + + Returns: + True if there is a ZooKeeper relation. Otherwise False + """ + return bool(self.relation) + + @property + def username(self) -> str: + """Username to connect to ZooKeeper.""" + return self.remote_app_data.get("username", "") + + @property + def password(self) -> str: + """Password of the ZooKeeper user.""" + return self.remote_app_data.get("password", "") + + @property + def endpoints(self) -> str: + """IP/host where ZooKeeper is located.""" + return self.remote_app_data.get("endpoints", "") + + @property + def chroot(self) -> str: + """Path allocated for Kafka on ZooKeeper.""" + return self.remote_app_data.get("chroot", "") + + @property + def uris(self) -> str: + """Comma separated connection string, containing endpoints + chroot.""" + return self.remote_app_data.get("uris", "") + + @property + def tls(self) -> bool: + """Check if TLS is enabled on ZooKeeper.""" + return bool(self.remote_app_data.get("tls", "disabled") == "enabled") + + @property + def connect(self) -> str: + """Full connection string of sorted uris.""" + sorted_uris = sorted(self.uris.replace(self.chroot, "").split(",")) + sorted_uris[-1] = sorted_uris[-1] + self.chroot + return ",".join(sorted_uris) + + @property + def zookeeper_connected(self) -> bool: + """Checks if there is an active ZooKeeper relation with all necessary data. + + Returns: + True if ZooKeeper is currently related with sufficient relation data + for a broker to connect with. Otherwise False + """ + if not all([self.username, self.password, self.endpoints, self.chroot, self.uris]): + return False + + return True + + @property + def zookeeper_version(self) -> str: + """Get running zookeeper version.""" + hosts = self.endpoints.split(",") + zk = ZooKeeperManager(hosts=hosts, username=self.username, password=self.password) + + return zk.get_version() + + @retry( + # retry to give ZK time to update its broker zNodes before failing + wait=wait_fixed(6), + stop=stop_after_attempt(10), + retry_error_callback=(lambda state: state.outcome.result()), # type: ignore + retry=retry_if_not_result(lambda result: True if result else False), + ) + def broker_active(self) -> bool: + """Checks if broker id is recognised as active by ZooKeeper.""" + broker_id = self._local_unit.name.split("/")[1] + brokers = self.get_active_brokers() + return f"{self.chroot}/brokers/ids/{broker_id}" in brokers + + def get_active_brokers(self) -> set[str]: + """Gets all brokers currently connected to ZooKeeper.""" + hosts = self.endpoints.split(",") + zk = ZooKeeperManager(hosts=hosts, username=self.username, password=self.password) + path = f"{self.chroot}/brokers/ids/" + + try: + brokers = zk.leader_znodes(path=path) + # auth might not be ready with ZK after relation yet + except (NoNodeError, AuthFailedError, QuorumLeaderNotFoundError) as e: + logger.debug(str(e)) + return set() + + return brokers diff --git a/src/structured_config.py b/src/core/structured_config.py similarity index 89% rename from src/structured_config.py rename to src/core/structured_config.py index 38e8e104..8a5efcab 100644 --- a/src/structured_config.py +++ b/src/core/structured_config.py @@ -6,7 +6,6 @@ import logging import re from enum import Enum -from typing import Optional from charms.data_platform_libs.v0.data_models import BaseConfigModel from pydantic import validator @@ -53,7 +52,7 @@ class CharmConfig(BaseConfigModel): compression_type: str log_flush_interval_messages: int # int # long - log_flush_interval_ms: Optional[int] # long + log_flush_interval_ms: int | None # long log_flush_offset_checkpoint_interval_ms: int log_retention_bytes: int # long log_retention_ms: int # long @@ -66,12 +65,12 @@ class CharmConfig(BaseConfigModel): log_cleaner_min_compaction_lag_ms: int # long log_cleanup_policy: str log_message_timestamp_type: str - ssl_cipher_suites: Optional[str] + ssl_cipher_suites: str | None ssl_principal_mapping_rules: str replication_quota_window_num: int - zookeeper_ssl_cipher_suites: Optional[str] + zookeeper_ssl_cipher_suites: str | None profile: str - certificate_extra_sans: Optional[str] + certificate_extra_sans: str | None log_level: str @validator("*", pre=True) @@ -84,7 +83,7 @@ def blank_string(cls, value): @validator("log_message_timestamp_type") @classmethod - def log_message_timestamp_type_validator(cls, value: str) -> Optional[str]: + def log_message_timestamp_type_validator(cls, value: str) -> str | None: """Check validity of `log_message_timestamp_type` field.""" try: _log_message_timestap_type = LogMessageTimestampType(value) @@ -96,7 +95,7 @@ def log_message_timestamp_type_validator(cls, value: str) -> Optional[str]: @validator("log_cleanup_policy") @classmethod - def log_cleanup_policy_validator(cls, value: str) -> Optional[str]: + def log_cleanup_policy_validator(cls, value: str) -> str | None: """Check validity of `log_cleanup_policy` field.""" try: _log_cleanup_policy = LogCleanupPolicy(value) @@ -108,7 +107,7 @@ def log_cleanup_policy_validator(cls, value: str) -> Optional[str]: @validator("log_cleaner_min_compaction_lag_ms") @classmethod - def log_cleaner_min_compaction_lag_ms_validator(cls, value: str) -> Optional[int]: + def log_cleaner_min_compaction_lag_ms_validator(cls, value: str) -> int | None: """Check validity of `log_cleaner_min_compaction_lag_ms` field.""" int_value = int(value) if int_value >= 0 and int_value <= 1000 * 60 * 60 * 24 * 7: @@ -117,7 +116,7 @@ def log_cleaner_min_compaction_lag_ms_validator(cls, value: str) -> Optional[int @validator("log_cleaner_delete_retention_ms") @classmethod - def log_cleaner_delete_retention_ms_validator(cls, value: str) -> Optional[int]: + def log_cleaner_delete_retention_ms_validator(cls, value: str) -> int | None: """Check validity of `log_cleaner_delete_retention_ms` field.""" int_value = int(value) if int_value > 0 and int_value <= 1000 * 60 * 60 * 24 * 90: @@ -126,7 +125,7 @@ def log_cleaner_delete_retention_ms_validator(cls, value: str) -> Optional[int]: @validator("ssl_principal_mapping_rules") @classmethod - def ssl_principal_mapping_rules_validator(cls, value: str) -> Optional[str]: + def ssl_principal_mapping_rules_validator(cls, value: str) -> str | None: """Check that the list is formed by valid regex values.""" # get all regex up until replacement position "/" # TODO: check that there is a replacement as well, not: RULE:regex/ @@ -141,7 +140,7 @@ def ssl_principal_mapping_rules_validator(cls, value: str) -> Optional[str]: @validator("transaction_state_log_num_partitions", "offsets_topic_num_partitions") @classmethod - def between_zero_and_10k(cls, value: int) -> Optional[int]: + def between_zero_and_10k(cls, value: int) -> int | None: """Check that the integer value is between zero and 10000.""" if value >= 0 and value <= 10000: return value @@ -149,7 +148,7 @@ def between_zero_and_10k(cls, value: int) -> Optional[int]: @validator("log_retention_bytes", "log_retention_ms") @classmethod - def greater_than_minus_one(cls, value: str) -> Optional[int]: + def greater_than_minus_one(cls, value: str) -> int | None: """Check value greater than -1.""" int_value = int(value) if int_value < -1: @@ -158,7 +157,7 @@ def greater_than_minus_one(cls, value: str) -> Optional[int]: @validator("log_flush_interval_messages", "log_flush_interval_ms") @classmethod - def greater_than_one(cls, value: str) -> Optional[int]: + def greater_than_one(cls, value: str) -> int | None: """Check value greater than one.""" int_value = int(value) if int_value < 1: @@ -167,7 +166,7 @@ def greater_than_one(cls, value: str) -> Optional[int]: @validator("replication_quota_window_num", "log_segment_bytes", "message_max_bytes") @classmethod - def greater_than_zero(cls, value: int) -> Optional[int]: + def greater_than_zero(cls, value: int) -> int | None: """Check value greater than zero.""" if value < 0: raise ValueError("Value below -1. Accepted value are greater or equal than -1.") @@ -175,7 +174,7 @@ def greater_than_zero(cls, value: int) -> Optional[int]: @validator("compression_type") @classmethod - def value_compression_type(cls, value: str) -> Optional[str]: + def value_compression_type(cls, value: str) -> str | None: """Check validity of `compression_type` field.""" try: _compression_type = CompressionType(value) @@ -194,7 +193,7 @@ def value_compression_type(cls, value: str) -> Optional[str]: "replication_quota_window_num", ) @classmethod - def integer_value(cls, value: int) -> Optional[int]: + def integer_value(cls, value: int) -> int | None: """Check that the value is an integer (-2147483648,2147483647).""" if value >= -2147483648 and value <= 2147483647: return value @@ -209,7 +208,7 @@ def integer_value(cls, value: int) -> Optional[int]: "log_cleaner_min_compaction_lag_ms", ) @classmethod - def long_value(cls, value: str) -> Optional[int]: + def long_value(cls, value: str) -> int | None: """Check that the value is a long (-2^63 , 2^63 -1).""" int_value = int(value) if int_value >= -9223372036854775807 and int_value <= 9223372036854775808: @@ -218,7 +217,7 @@ def long_value(cls, value: str) -> Optional[int]: @validator("profile") @classmethod - def profile_values(cls, value: str) -> Optional[str]: + def profile_values(cls, value: str) -> str | None: """Check profile config option is one of `testing`, `staging` or `production`.""" if value not in ["testing", "staging", "production"]: raise ValueError("Value not one of 'testing', 'staging' or 'production'") @@ -227,7 +226,7 @@ def profile_values(cls, value: str) -> Optional[str]: @validator("log_level") @classmethod - def log_level_values(cls, value: str) -> Optional[str]: + def log_level_values(cls, value: str) -> str | None: """Check validity of `log_level` field.""" try: _log_level = LogLevel(value) diff --git a/src/core/workload.py b/src/core/workload.py new file mode 100644 index 00000000..0608d6df --- /dev/null +++ b/src/core/workload.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Supporting objects for Kafka charm state.""" + +import secrets +import string +from abc import ABC, abstractmethod + +from literals import PATHS + + +class KafkaPaths: + """Object to store common paths for Kafka.""" + + def __init__(self): + self.conf_path = PATHS["CONF"] + self.data_path = PATHS["DATA"] + self.binaries_path = PATHS["BIN"] + self.logs_path = PATHS["LOGS"] + + @property + def server_properties(self): + """The main server.properties filepath. + + Contains all the main configuration for the service. + """ + return f"{self.conf_path}/server.properties" + + @property + def client_properties(self): + """The main client.properties filepath. + + Contains all the client configuration for the service. + """ + return f"{self.conf_path}/client.properties" + + @property + def zk_jaas(self): + """The zookeeper-jaas.cfg filepath. + + Contains internal+external user credentials used in SASL auth. + """ + return f"{self.conf_path}/zookeeper-jaas.cfg" + + @property + def keystore(self): + """The Java Keystore containing service private-key and signed certificates.""" + return f"{self.conf_path}/keystore.p12" + + @property + def truststore(self): + """The Java Truststore containing trusted CAs + certificates.""" + return f"{self.conf_path}/truststore.jks" + + @property + def log4j_properties(self): + """The Log4j properties filepath. + + Contains the Log4j configuration options of the service. + """ + return f"{self.conf_path}/log4j.properties" + + @property + def jmx_prometheus_javaagent(self): + """The JMX exporter JAR filepath. + + Used for scraping and exposing mBeans of a JMX target. + """ + return f"{self.binaries_path}/libs/jmx_prometheus_javaagent.jar" + + @property + def jmx_prometheus_config(self): + """The configuration for the JMX exporter.""" + return f"{self.conf_path}/jmx_prometheus.yaml" + + +class WorkloadBase(ABC): + """Base interface for common workload operations.""" + + paths = KafkaPaths() + + @abstractmethod + def start(self) -> None: + """Starts the workload service.""" + ... + + @abstractmethod + def stop(self) -> None: + """Stops the workload service.""" + ... + + @abstractmethod + def restart(self) -> None: + """Restarts the workload service.""" + ... + + @abstractmethod + def read(self, path: str) -> list[str]: + """Reads a file from the workload. + + Args: + path: the full filepath to read from + + Returns: + List of string lines from the specified path + """ + ... + + @abstractmethod + def write(self, content: str, path: str, mode: str = "w") -> None: + """Writes content to a workload file. + + Args: + content: string of content to write + path: the full filepath to write to + mode: the write mode. Usually "w" for write, or "a" for append. Default "w" + """ + ... + + @abstractmethod + def exec( + self, command: str, env: dict[str, str] | None = None, working_dir: str | None = None + ) -> str: + """Runs a command on the workload substrate.""" + ... + + @abstractmethod + def active(self) -> bool: + """Checks that the workload is active.""" + ... + + @abstractmethod + def run_bin_command(self, bin_keyword: str, bin_args: list[str], opts: list[str] = []) -> str: + """Runs kafka bin command with desired args. + + Args: + bin_keyword: the kafka shell script to run + e.g `configs`, `topics` etc + bin_args: the shell command args + opts: any additional opts args strings + + Returns: + String of kafka bin command output + """ + ... + + @staticmethod + def generate_password() -> str: + """Creates randomized string for use as app passwords. + + Returns: + String of 32 randomized letter+digit characters + """ + return "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(32)]) diff --git a/src/events/password_actions.py b/src/events/password_actions.py new file mode 100644 index 00000000..a3354da2 --- /dev/null +++ b/src/events/password_actions.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Event handlers for password-related Juju Actions.""" +import logging +from typing import TYPE_CHECKING + +from ops.charm import ActionEvent +from ops.framework import Object + +from literals import ADMIN_USER, INTERNAL_USERS + +if TYPE_CHECKING: + from charm import KafkaCharm + +logger = logging.getLogger(__name__) + + +class PasswordActionEvents(Object): + """Event handlers for password-related Juju Actions.""" + + def __init__(self, charm): + super().__init__(charm, "password_events") + self.charm: "KafkaCharm" = charm + + self.framework.observe( + getattr(self.charm.on, "set_password_action"), self._set_password_action + ) + self.framework.observe( + getattr(self.charm.on, "get_admin_credentials_action"), + self._get_admin_credentials_action, + ) + + def _set_password_action(self, event: ActionEvent) -> None: + """Handler for set-password action. + + Set the password for a specific user, if no passwords are passed, generate them. + """ + if not self.model.unit.is_leader(): + msg = "Password rotation must be called on leader unit" + logger.error(msg) + event.fail(msg) + return + + if not self.charm.healthy: + msg = "Unit is not healthy" + logger.error(msg) + event.fail(msg) + return + + username = event.params["username"] + if username not in INTERNAL_USERS: + msg = f"Can only update internal charm users: {INTERNAL_USERS}, not {username}." + logger.error(msg) + event.fail(msg) + return + + new_password = event.params.get("password", self.charm.workload.generate_password()) + + if new_password in self.charm.state.cluster.internal_user_credentials.values(): + msg = "Password already exists, please choose a different password." + logger.error(msg) + event.fail(msg) + return + + try: + self.charm.auth_manager.add_user( + username=username, password=new_password, zk_auth=True + ) + except Exception as e: + logger.error(str(e)) + event.fail(f"unable to set password for {username}") + return + + # Store the password on application databag + self.charm.state.cluster.relation_data.update({f"{username}-password": new_password}) + event.set_results({f"{username}-password": new_password}) + + def _get_admin_credentials_action(self, event: ActionEvent) -> None: + client_properties = self.charm.workload.read(self.charm.workload.paths.client_properties) + + if not client_properties: + msg = "client.properties file not found on target unit." + logger.error(msg) + event.fail(msg) + return + + admin_properties = set(client_properties) - set(self.charm.config_manager.tls_properties) + + event.set_results( + { + "username": ADMIN_USER, + "password": self.charm.state.cluster.internal_user_credentials[ADMIN_USER], + "client-properties": "\n".join(admin_properties), + } + ) diff --git a/src/provider.py b/src/events/provider.py similarity index 74% rename from src/provider.py rename to src/events/provider.py index 7415e149..3c57519d 100644 --- a/src/provider.py +++ b/src/events/provider.py @@ -11,11 +11,9 @@ from charms.data_platform_libs.v0.data_interfaces import KafkaProvides, TopicRequestedEvent from ops.charm import RelationBrokenEvent, RelationCreatedEvent from ops.framework import Object +from ops.pebble import ExecError -from auth import KafkaAuth -from config import KafkaConfig from literals import REL_NAME -from utils import generate_password if TYPE_CHECKING: from charm import KafkaCharm @@ -29,9 +27,6 @@ class KafkaProvider(Object): def __init__(self, charm) -> None: super().__init__(charm, "kafka_client") self.charm: "KafkaCharm" = charm - self.kafka_config = KafkaConfig(self.charm) - self.kafka_auth = KafkaAuth(charm) - self.kafka_provider = KafkaProvides(self.charm, REL_NAME) self.framework.observe(self.charm.on[REL_NAME].relation_created, self._on_relation_created) @@ -50,17 +45,20 @@ def on_topic_requested(self, event: TopicRequestedEvent): # on all unit update the server properties to enable client listener if needed self.charm._on_config_changed(event) - if not self.charm.unit.is_leader() or not self.charm.peer_relation: + if not self.charm.unit.is_leader() or not self.charm.state.peer_relation: return extra_user_roles = event.extra_user_roles or "" topic = event.topic or "" relation = event.relation username = f"relation-{relation.id}" - password = self.charm.app_peer_data.get(username) or generate_password() - bootstrap_server = self.charm.kafka_config.bootstrap_server - zookeeper_uris = self.charm.kafka_config.zookeeper_config.get("connect", "") - tls = "enabled" if self.charm.tls.enabled else "disabled" + password = ( + self.charm.state.cluster.client_passwords.get(username) + or self.charm.workload.generate_password() + ) + bootstrap_server = self.charm.state.bootstrap_server + zookeeper_uris = self.charm.state.zookeeper.connect + tls = "enabled" if self.charm.state.cluster.tls_enabled else "disabled" consumer_group_prefix = ( event.consumer_group_prefix or f"{username}-" if "consumer" in extra_user_roles else "" @@ -68,19 +66,19 @@ def on_topic_requested(self, event: TopicRequestedEvent): # catching error here in case listeners not established for bootstrap-server auth try: - self.kafka_auth.add_user( + self.charm.auth_manager.add_user( username=username, password=password, ) - except subprocess.CalledProcessError: - logger.warning("unable to create internal user just yet") + except (subprocess.CalledProcessError, ExecError): + logger.warning(f"unable to create user {username} just yet") event.defer() return # non-leader units need cluster_config_changed event to update their super.users - self.charm.app_peer_data.update({username: password}) + self.charm.state.cluster.update({username: password}) - self.kafka_auth.update_user_acls( + self.charm.auth_manager.update_user_acls( username=username, topic=topic, extra_user_roles=extra_user_roles, @@ -88,7 +86,7 @@ def on_topic_requested(self, event: TopicRequestedEvent): ) # non-leader units need cluster_config_changed event to update their super.users - self.charm.app_peer_data.update({"super-users": self.kafka_config.super_users}) + self.charm.state.cluster.update({"super-users": self.charm.state.super_users}) self.kafka_provider.set_bootstrap_server(relation.id, ",".join(bootstrap_server)) self.kafka_provider.set_consumer_group_prefix(relation.id, consumer_group_prefix) @@ -113,7 +111,7 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None: if self.charm.app.planned_units == 0: return - if not self.charm.unit.is_leader() or not self.charm.peer_relation: + if not self.charm.unit.is_leader() or not self.charm.state.peer_relation: return if not self.charm.healthy: @@ -122,23 +120,23 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None: if event.relation.app != self.charm.app or not self.charm.app.planned_units() == 0: username = f"relation-{event.relation.id}" - self.kafka_auth.remove_all_user_acls(username=username) - self.kafka_auth.delete_user(username=username) + self.charm.auth_manager.remove_all_user_acls(username=username) + self.charm.auth_manager.delete_user(username=username) # non-leader units need cluster_config_changed event to update their super.users - # update on the peer relation data will trigger an update of server properties on all unit - self.charm.app_peer_data.update({username: ""}) + # update on the peer relation data will trigger an update of server properties on all units + self.charm.state.cluster.update({username: ""}) def update_connection_info(self): """Updates all relations with current endpoints, bootstrap-server and tls data. If information didn't change, no events will trigger. """ - bootstrap_server = self.charm.kafka_config.bootstrap_server - zookeeper_uris = self.charm.kafka_config.zookeeper_config.get("connect", "") - tls = "enabled" if self.charm.tls.enabled else "disabled" + bootstrap_server = self.charm.state.bootstrap_server + zookeeper_uris = self.charm.state.zookeeper.connect + tls = "enabled" if self.charm.state.cluster.tls_enabled else "disabled" for relation in self.charm.model.relations[REL_NAME]: - if self.charm.app_peer_data.get(f"relation-{relation.id}", None): + if f"relation-{relation.id}" in self.charm.state.cluster.client_passwords: self.kafka_provider.set_bootstrap_server( relation_id=relation.id, bootstrap_server=",".join(bootstrap_server) ) diff --git a/src/events/tls.py b/src/events/tls.py new file mode 100644 index 00000000..a7d7b5c6 --- /dev/null +++ b/src/events/tls.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling Kafka TLS configuration.""" + +import base64 +import json +import logging +import os +import re +import socket +from typing import TYPE_CHECKING + +from charms.tls_certificates_interface.v1.tls_certificates import ( + CertificateAvailableEvent, + TLSCertificatesRequiresV1, + _load_relation_data, + generate_csr, + generate_private_key, +) +from ops.charm import ( + ActionEvent, + RelationBrokenEvent, + RelationChangedEvent, + RelationJoinedEvent, +) +from ops.framework import Object +from ops.model import ActiveStatus, BlockedStatus + +from literals import TLS_RELATION, TRUSTED_CA_RELATION, TRUSTED_CERTIFICATE_RELATION + +if TYPE_CHECKING: + from charm import KafkaCharm + +logger = logging.getLogger(__name__) + + +class TLSHandler(Object): + """Handler for managing the client and unit TLS keys/certs.""" + + def __init__(self, charm): + super().__init__(charm, "tls") + self.charm: "KafkaCharm" = charm + + self.certificates = TLSCertificatesRequiresV1(self.charm, TLS_RELATION) + + # Own certificates handlers + self.framework.observe( + self.charm.on[TLS_RELATION].relation_created, self._tls_relation_created + ) + self.framework.observe( + self.charm.on[TLS_RELATION].relation_joined, self._tls_relation_joined + ) + self.framework.observe( + self.charm.on[TLS_RELATION].relation_broken, self._tls_relation_broken + ) + self.framework.observe( + getattr(self.certificates.on, "certificate_available"), self._on_certificate_available + ) + self.framework.observe( + getattr(self.certificates.on, "certificate_expiring"), self._on_certificate_expiring + ) + self.framework.observe( + getattr(self.charm.on, "set_tls_private_key_action"), self._set_tls_private_key + ) + + # External certificates handlers (for mTLS) + for relation in [TRUSTED_CERTIFICATE_RELATION, TRUSTED_CA_RELATION]: + self.framework.observe( + self.charm.on[relation].relation_created, + self._trusted_relation_created, + ) + self.framework.observe( + self.charm.on[relation].relation_joined, + self._trusted_relation_joined, + ) + self.framework.observe( + self.charm.on[relation].relation_changed, + self._trusted_relation_changed, + ) + self.framework.observe( + self.charm.on[relation].relation_broken, + self._trusted_relation_broken, + ) + + def _tls_relation_created(self, _) -> None: + """Handler for `certificates_relation_created` event.""" + if not self.charm.unit.is_leader() or not self.charm.state.peer_relation: + return + + self.charm.state.cluster.update({"tls": "enabled"}) + + def _tls_relation_joined(self, _) -> None: + """Handler for `certificates_relation_joined` event.""" + # generate unit private key if not already created by action + if not self.charm.state.broker.private_key: + self.charm.state.broker.update({"private-key": generate_private_key().decode("utf-8")}) + + # generate unit private key if not already created by action + if not self.charm.state.broker.keystore_password: + self.charm.state.broker.update( + {"keystore-password": self.charm.workload.generate_password()} + ) + if not self.charm.state.broker.truststore_password: + self.charm.state.broker.update( + {"truststore-password": self.charm.workload.generate_password()} + ) + + self._request_certificate() + + def _tls_relation_broken(self, _) -> None: + """Handler for `certificates_relation_broken` event.""" + self.charm.state.broker.update({"csr": ""}) + self.charm.state.broker.update({"certificate": ""}) + self.charm.state.broker.update({"ca": ""}) + + # remove all existing keystores from the unit so we don't preserve certs + self.charm.tls_manager.remove_stores() + + if not self.charm.unit.is_leader(): + return + + self.charm.state.cluster.update({"tls": ""}) + + def _trusted_relation_created(self, _) -> None: + """Handle relation created event to trusted tls charm.""" + if not self.charm.unit.is_leader(): + return + + if not self.charm.state.cluster.tls_enabled: + msg = "Own certificates are not set. Please relate using 'certificates' relation first" + logger.error(msg) + self.charm.app.status = BlockedStatus(msg) + return + + # Create a "mtls" flag so a new listener (CLIENT_SSL) is created + self.charm.state.cluster.update({"mtls": "enabled"}) + self.charm.app.status = ActiveStatus() + + def _trusted_relation_joined(self, event: RelationJoinedEvent) -> None: + """Generate a CSR so the tls-certificates operator works as expected.""" + # Once the certificates have been added, TLS setup has finished + if not self.charm.state.broker.certificate: + logger.debug("Missing TLS relation, deferring") + event.defer() + return + + alias = self.charm.tls_manager.generate_alias( + app_name=event.app.name, # pyright: ignore[reportOptionalMemberAccess] + relation_id=event.relation.id, + ) + subject = os.uname()[1] if self.charm.substrate == "k8s" else self.charm.state.broker.host + csr = ( + generate_csr( + add_unique_id_to_subject_name=bool(alias), + private_key=self.charm.state.broker.private_key.encode( # pyright: ignore[reportOptionalMemberAccess] + "utf-8" + ), + subject=subject, + sans_ip=self._sans["sans_ip"], + sans_dns=self._sans["sans_dns"], + ) + .decode() + .strip() + ) + + csr_dict = [{"certificate_signing_request": csr}] + event.relation.data[self.model.unit]["certificate_signing_requests"] = json.dumps(csr_dict) + + def _trusted_relation_changed(self, event: RelationChangedEvent) -> None: + """Overrides the requirer logic of TLSInterface.""" + # Once the certificates have been added, TLS setup has finished + if not self.charm.state.broker.certificate: + logger.debug("Missing TLS relation, deferring") + event.defer() + return + + relation_data = _load_relation_data(dict(event.relation.data[event.relation.app])) # type: ignore[reportOptionalMemberAccess] + provider_certificates = relation_data.get("certificates", []) + + if not provider_certificates: + logger.warning("No certificates on provider side") + event.defer() + return + + alias = self.charm.tls_manager.generate_alias( + event.relation.app.name, # pyright: ignore[reportOptionalMemberAccess] + event.relation.id, + ) + # NOTE: Relation should only be used with one set of certificates, + # hence using just the first item on the list. + content = ( + provider_certificates[0]["certificate"] + if event.relation.name == TRUSTED_CERTIFICATE_RELATION + else provider_certificates[0]["ca"] + ) + filename = f"{alias}.pem" + self.charm.workload.write( + content=content, path=f"{self.charm.workload.paths.conf_path}/{filename}" + ) + self.charm.tls_manager.import_cert(alias=f"{alias}", filename=filename) + + # ensuring new config gets applied + self.charm.on[f"{self.charm.restart.name}"].acquire_lock.emit() + + def _trusted_relation_broken(self, event: RelationBrokenEvent) -> None: + """Handle relation broken for a trusted certificate/ca relation.""" + # Once the certificates have been added, TLS setup has finished + if not self.charm.state.broker.certificate: + logger.debug("Missing TLS relation, deferring") + event.defer() + return + + # All units will need to remove the cert from their truststore + alias = self.charm.tls_manager.generate_alias( + app_name=event.relation.app.name, # pyright: ignore[reportOptionalMemberAccess] + relation_id=event.relation.id, + ) + self.charm.tls_manager.remove_cert(alias=alias) + + # The leader will also handle removing the "mtls" flag if needed + if not self.charm.unit.is_leader(): + return + + # Get all relations, and remove the one being broken + all_relations = ( + self.model.relations[TRUSTED_CA_RELATION] + + self.model.relations[TRUSTED_CERTIFICATE_RELATION] + ) + all_relations.remove(event.relation) + logger.debug(f"Remaining relations: {all_relations}") + + # No relations means that there are no certificates left in the truststore + if not all_relations: + self.charm.state.cluster.update({"mtls": ""}) + + def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: + """Handler for `certificates_available` event after provider updates signed certs.""" + if not self.charm.state.peer_relation: + logger.warning("No peer relation on certificate available") + event.defer() + return + + # avoid setting tls files and restarting + if event.certificate_signing_request != self.charm.state.broker.csr: + logger.error("Can't use certificate, found unknown CSR") + return + + self.charm.state.broker.update({"certificate": event.certificate}) + self.charm.state.broker.update({"ca": event.ca}) + + self.charm.tls_manager.set_server_key() + self.charm.tls_manager.set_ca() + self.charm.tls_manager.set_certificate() + self.charm.tls_manager.set_truststore() + self.charm.tls_manager.set_keystore() + + def _on_certificate_expiring(self, _) -> None: + """Handler for `certificate_expiring` event.""" + if ( + not self.charm.state.broker.private_key + or not self.charm.state.broker.csr + or not self.charm.state.peer_relation + ): + logger.error("Missing unit private key and/or old csr") + return + new_csr = generate_csr( + private_key=self.charm.state.broker.private_key.encode("utf-8"), + subject=self.charm.state.broker.relation_data.get("private-address", ""), + sans_ip=self._sans["sans_ip"], + sans_dns=self._sans["sans_dns"], + ) + + self.certificates.request_certificate_renewal( + old_certificate_signing_request=self.charm.state.broker.csr.encode("utf-8"), + new_certificate_signing_request=new_csr, + ) + + self.charm.state.broker.update({"csr": new_csr.decode("utf-8").strip()}) + + def _set_tls_private_key(self, event: ActionEvent) -> None: + """Handler for `set_tls_private_key` action.""" + key = event.params.get("internal-key") or generate_private_key().decode("utf-8") + private_key = ( + key + if re.match(r"(-+(BEGIN|END) [A-Z ]+-+)", key) + else base64.b64decode(key).decode("utf-8") + ) + + self.charm.state.broker.update({"private-key": private_key}) + self._on_certificate_expiring(event) + + def _request_certificate(self): + """Generates and submits CSR to provider.""" + if not self.charm.state.broker.private_key or not self.charm.state.peer_relation: + logger.error("Can't request certificate, missing private key") + return + + csr = generate_csr( + private_key=self.charm.state.broker.private_key.encode("utf-8"), + subject=self.charm.state.broker.relation_data.get("private-address", ""), + sans_ip=self._sans["sans_ip"], + sans_dns=self._sans["sans_dns"], + ) + self.charm.state.broker.update({"csr": csr.decode("utf-8").strip()}) + + self.certificates.request_certificate_creation(certificate_signing_request=csr) + + @property + def _sans(self) -> dict[str, list[str] | None]: + """Builds a SAN dict of DNS names and IPs for the unit.""" + if self.charm.substrate == "vm": + return { + "sans_ip": [self.charm.state.broker.host], + "sans_dns": [self.model.unit.name, socket.getfqdn()] + self._extra_sans, + } + else: + bind_address = "" + if self.charm.state.peer_relation: + if binding := self.charm.model.get_binding(self.charm.state.peer_relation): + bind_address = binding.network.bind_address + return { + "sans_ip": [str(bind_address)], + "sans_dns": [ + self.charm.state.broker.host.split(".")[0], + self.charm.state.broker.host, + socket.getfqdn(), + ] + + self._extra_sans, + } + + @property + def _extra_sans(self) -> list[str]: + """Parse the certificate_extra_sans config option.""" + extra_sans = self.charm.config.certificate_extra_sans or "" + parsed_sans = [] + + if extra_sans == "": + return parsed_sans + + for sans in extra_sans.split(","): + parsed_sans.append(sans.replace("{unit}", self.charm.unit.name.split("/")[1])) + + return parsed_sans diff --git a/src/upgrade.py b/src/events/upgrade.py similarity index 89% rename from src/upgrade.py rename to src/events/upgrade.py index 30276f07..d3fbd6b9 100644 --- a/src/upgrade.py +++ b/src/events/upgrade.py @@ -17,8 +17,6 @@ from pydantic import BaseModel from typing_extensions import override -from utils import get_zookeeper_version - if TYPE_CHECKING: from charm import KafkaCharm @@ -52,7 +50,7 @@ def current_version(self) -> str: @property def zookeeper_current_version(self) -> str: """Get current Zookeeper version.""" - return get_zookeeper_version(zookeeper_config=self.charm.kafka_config.zookeeper_config) + return self.charm.state.zookeeper.zookeeper_version def post_upgrade_check(self) -> None: """Runs necessary checks validating the unit is in a healthy state after upgrade.""" @@ -67,7 +65,7 @@ def pre_upgrade_check(self) -> None: @override def build_upgrade_stack(self) -> list[int]: upgrade_stack = [] - units = set([self.charm.unit] + list(self.charm.peer_relation.units)) # type: ignore[reportOptionalMemberAccess] + units = set([self.charm.unit] + list(self.charm.state.peer_relation.units)) # type: ignore[reportOptionalMemberAccess] for unit in units: upgrade_stack.append(int(unit.name.split("/")[-1])) @@ -92,17 +90,17 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: self.set_unit_failed() return - self.charm.snap.stop_snap_service() + self.charm.workload.stop() - if not self.charm.snap.install(): + if not self.charm.workload.install(): logger.error("Unable to install Snap") self.set_unit_failed() return - self.charm.kafka_config.set_environment() + self.charm.config_manager.set_environment() logger.info(f"{self.charm.unit.name} upgrading service...") - self.charm.snap.restart_snap_service() + self.charm.workload.restart() # Allow for some time to settle down # FIXME: This logic should be improved as part of ticket DPE-3155 diff --git a/src/events/zookeeper.py b/src/events/zookeeper.py new file mode 100644 index 00000000..5055fe8d --- /dev/null +++ b/src/events/zookeeper.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Supporting objects for Kafka-Zookeeper relation.""" + +import logging +import subprocess +from typing import TYPE_CHECKING + +from ops import Object, RelationChangedEvent, RelationEvent +from ops.pebble import ExecError + +from literals import INTERNAL_USERS, ZK, Status + +if TYPE_CHECKING: + from charm import KafkaCharm + +logger = logging.getLogger(__name__) + + +class ZooKeeperHandler(Object): + """Implements the provider-side logic for client applications relating to Kafka.""" + + def __init__(self, charm) -> None: + super().__init__(charm, "zookeeper_client") + self.charm: "KafkaCharm" = charm + + self.framework.observe(self.charm.on[ZK].relation_created, self._on_zookeeper_created) + self.framework.observe(self.charm.on[ZK].relation_joined, self._on_zookeeper_changed) + self.framework.observe(self.charm.on[ZK].relation_changed, self._on_zookeeper_changed) + self.framework.observe(self.charm.on[ZK].relation_broken, self._on_zookeeper_broken) + + def _on_zookeeper_created(self, _) -> None: + """Handler for `zookeeper_relation_created` events.""" + if self.model.unit.is_leader(): + self.charm.state.zookeeper.update({"chroot": "/" + self.model.app.name}) + + def _on_zookeeper_changed(self, event: RelationChangedEvent) -> None: + """Handler for `zookeeper_relation_created/joined/changed` events, ensuring internal users get created.""" + if not self.charm.state.zookeeper.zookeeper_connected: + logger.debug("No information found from ZooKeeper relation") + self.charm._set_status(Status.ZK_NO_DATA) + return + + # TLS must be enabled for Kafka and ZK or disabled for both + if self.charm.state.cluster.tls_enabled ^ self.charm.state.zookeeper.tls: + event.defer() + self.charm._set_status(Status.ZK_TLS_MISMATCH) + return + + # do not create users until certificate + keystores created + # otherwise unable to authenticate to ZK + if self.charm.state.cluster.tls_enabled and not self.charm.state.broker.certificate: + self.charm._set_status(Status.NO_CERT) + event.defer() + return + + if not self.charm.state.cluster.internal_user_credentials and self.model.unit.is_leader(): + # loading the minimum config needed to authenticate to zookeeper + self.charm.config_manager.set_zk_jaas_config() + self.charm.config_manager.set_server_properties() + + try: + internal_user_credentials = self._create_internal_credentials() + except (KeyError, RuntimeError, subprocess.CalledProcessError, ExecError) as e: + logger.warning(str(e)) + event.defer() + return + + # only set to relation data when all set + for username, password in internal_user_credentials: + self.charm.state.cluster.update({f"{username}-password": password}) + + # attempt re-start of Kafka for all units on zookeeper-changed + # avoids relying on deferred events elsewhere that may not exist after cluster init + if not self.charm.healthy and self.charm.state.cluster.internal_user_credentials: + self.charm._on_start(event) + + self.charm._on_config_changed(event) + + def _on_zookeeper_broken(self, _: RelationEvent) -> None: + """Handler for `zookeeper_relation_broken` event, ensuring charm blocks.""" + self.charm.workload.stop() + + logger.info(f'Broker {self.model.unit.name.split("/")[1]} disconnected') + self.charm._set_status(Status.ZK_NOT_RELATED) + + # Kafka keeps a meta.properties in every log.dir with a unique ClusterID + # this ID is provided by ZK, and removing it on relation-broken allows + # re-joining to another ZK cluster. + for storage in self.charm.model.storages["data"]: + self.charm.workload.exec(f"rm {storage.location}/meta.properties") + + if not self.charm.unit.is_leader(): + return + + # other charm methods assume credentials == ACLs + # necessary to clean-up credentials once ZK relation is lost + for username in self.charm.state.cluster.internal_user_credentials: + self.charm.state.cluster.update({f"{username}-password": ""}) + + def _create_internal_credentials(self) -> list[tuple[str, str]]: + """Creates internal SCRAM users during cluster start. + + Returns: + List of (username, password) for all internal users + + Raises: + RuntimeError if called from non-leader unit + KeyError if attempted to update non-leader unit + subprocess.CalledProcessError if command to ZooKeeper failed + """ + credentials = [ + (username, self.charm.workload.generate_password()) for username in INTERNAL_USERS + ] + for username, password in credentials: + self.charm.auth_manager.add_user(username=username, password=password, zk_auth=True) + + return credentials diff --git a/src/health.py b/src/health.py index e7996659..85f01ff2 100644 --- a/src/health.py +++ b/src/health.py @@ -8,12 +8,11 @@ import logging import subprocess from statistics import mean -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING from ops.framework import Object from literals import JVM_MEM_MAX_GB, JVM_MEM_MIN_GB -from utils import safe_get_file if TYPE_CHECKING: from charm import KafkaCharm @@ -31,61 +30,37 @@ def __init__(self, charm) -> None: @property def _service_pid(self) -> int: """Gets most recent Kafka service pid from the snap logs.""" - return self.charm.snap.get_service_pid() + return self.charm.workload.get_service_pid() def _get_current_memory_maps(self) -> int: """Gets the current number of memory maps for the Kafka process.""" - return int( - subprocess.check_output( - f"cat /proc/{self._service_pid}/maps | wc -l", - shell=True, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ) + return int(self.charm.workload.exec(f"cat /proc/{self._service_pid}/maps | wc -l")) def _get_current_max_files(self) -> int: """Gets the current file descriptor limit for the Kafka process.""" return int( - subprocess.check_output( - rf"cat /proc/{self._service_pid}/limits | grep files | awk '{{print $5}}'", - shell=True, - stderr=subprocess.PIPE, - universal_newlines=True, + self.charm.workload.exec( + rf"cat /proc/{self._service_pid}/limits | grep files | awk '{{print $5}}'" ) ) def _get_max_memory_maps(self) -> int: """Gets the current memory map limit for the machine.""" - return int( - subprocess.check_output( - "sysctl -n vm.max_map_count", - shell=True, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ) + return int(self.charm.workload.exec("sysctl -n vm.max_map_count")) def _get_vm_swappiness(self) -> int: """Gets the current vm.swappiness configured for the machine.""" - return int( - subprocess.check_output( - "sysctl -n vm.swappiness", - shell=True, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - ) + return int(self.charm.workload.exec("sysctl -n vm.swappiness")) - def _get_partitions_size(self) -> Tuple[int, int]: + def _get_partitions_size(self) -> tuple[int, int]: """Gets the number of partitions and their average size from the log dirs.""" log_dirs_command = [ "--describe", - f"--bootstrap-server {','.join(self.charm.kafka_config.bootstrap_server)}", - f"--command-config {self.charm.kafka_config.client_properties_filepath}", + f"--bootstrap-server {','.join(self.charm.state.bootstrap_server)}", + f"--command-config {self.charm.workload.paths.client_properties}", ] try: - log_dirs = self.charm.snap.run_bin_command( + log_dirs = self.charm.workload.run_bin_command( bin_keyword="log-dirs", bin_args=log_dirs_command ) except subprocess.CalledProcessError: @@ -135,7 +110,7 @@ def _check_memory_maps(self) -> bool: def _check_file_descriptors(self) -> bool: """Checks that the number of used file descriptors is not approaching threshold.""" - if not self.charm.kafka_config.client_listeners: + if not self.charm.config_manager.client_listeners: return True total_partitions, average_partition_size = self._get_partitions_size() @@ -167,7 +142,7 @@ def _check_vm_swappiness(self) -> bool: def _check_total_memory(self) -> bool: """Checks that the total available memory is sufficient for desired profile.""" - if not (meminfo := safe_get_file(filepath="/proc/meminfo")): + if not (meminfo := self.charm.workload.read(path="/proc/meminfo")): return False total_memory_gb = int(meminfo[0].split()[1]) / 1000000 diff --git a/src/literals.py b/src/literals.py index 12928966..e1119490 100644 --- a/src/literals.py +++ b/src/literals.py @@ -2,18 +2,18 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. - """Collection of globals common to the KafkaCharm.""" from dataclasses import dataclass from enum import Enum -from typing import Dict, Literal +from typing import Literal from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, StatusBase, WaitingStatus CHARM_KEY = "kafka" SNAP_NAME = "charmed-kafka" CHARMED_KAFKA_SNAP_REVISION = 30 +CONTAINER = "kafka" PEER = "cluster" ZK = "zookeeper" @@ -28,9 +28,15 @@ METRICS_RULES_DIR = "./src/alert_rules/prometheus" LOGS_RULES_DIR = "./src/alert_rules/loki" +SUBSTRATE = "vm" +USER = "snap_daemon" +GROUP = "root" + AuthMechanism = Literal["SASL_PLAINTEXT", "SASL_SSL", "SSL"] Scope = Literal["INTERNAL", "CLIENT"] DebugLevel = Literal["DEBUG", "INFO", "WARNING", "ERROR"] +Substrate = Literal["vm", "k8s"] +DatabagScope = Literal["unit", "app"] JVM_MEM_MIN_GB = 1 JVM_MEM_MAX_GB = 6 @@ -41,14 +47,23 @@ "vm.dirty_background_ratio": "5", } +PATHS = { + "CONF": f"/var/snap/{SNAP_NAME}/current/etc/kafka", + "LOGS": f"/var/snap/{SNAP_NAME}/common/var/log/kafka", + "DATA": f"/var/snap/{SNAP_NAME}/common/var/lib/kafka", + "BIN": f"/snap/{SNAP_NAME}/current/opt/kafka", +} + @dataclass class Ports: + """Types of ports for a Kafka broker.""" + client: int internal: int -SECURITY_PROTOCOL_PORTS: Dict[AuthMechanism, Ports] = { +SECURITY_PROTOCOL_PORTS: dict[AuthMechanism, Ports] = { "SASL_PLAINTEXT": Ports(9092, 19092), "SASL_SSL": Ports(9093, 19093), "SSL": Ports(9094, 19094), @@ -57,21 +72,25 @@ class Ports: @dataclass class StatusLevel: + """Status object helper.""" + status: StatusBase log_level: DebugLevel class Status(Enum): + """Collection of possible statuses for the charm.""" + ACTIVE = StatusLevel(ActiveStatus(), "DEBUG") NO_PEER_RELATION = StatusLevel(MaintenanceStatus("no peer relation yet"), "DEBUG") SNAP_NOT_INSTALLED = StatusLevel(BlockedStatus(f"unable to install {SNAP_NAME} snap"), "ERROR") SNAP_NOT_RUNNING = StatusLevel(BlockedStatus("snap service not running"), "WARNING") - ZK_NOT_RELATED = StatusLevel(BlockedStatus("missing required zookeeper relation"), "ERROR") + ZK_NOT_RELATED = StatusLevel(BlockedStatus("missing required zookeeper relation"), "DEBUG") ZK_NOT_CONNECTED = StatusLevel(BlockedStatus("unit not connected to zookeeper"), "ERROR") ZK_TLS_MISMATCH = StatusLevel( BlockedStatus("tls must be enabled on both kafka and zookeeper"), "ERROR" ) - ZK_NO_DATA = StatusLevel(WaitingStatus("zookeeper credentials not created yet"), "INFO") + ZK_NO_DATA = StatusLevel(WaitingStatus("zookeeper credentials not created yet"), "DEBUG") ADDED_STORAGE = StatusLevel( ActiveStatus("manual partition reassignment may be needed to utilize new storage volumes"), "WARNING", @@ -87,7 +106,7 @@ class Status(Enum): "ERROR", ) NO_BROKER_CREDS = StatusLevel( - WaitingStatus("internal broker credentials not yet added"), "INFO" + WaitingStatus("internal broker credentials not yet added"), "DEBUG" ) NO_CERT = StatusLevel(WaitingStatus("unit waiting for signed certificates"), "INFO") SYSCONF_NOT_OPTIMAL = StatusLevel( diff --git a/src/auth.py b/src/managers/auth.py similarity index 77% rename from src/auth.py rename to src/managers/auth.py index f372cb01..74a700ec 100644 --- a/src/auth.py +++ b/src/managers/auth.py @@ -8,13 +8,11 @@ import re import subprocess from dataclasses import asdict, dataclass -from typing import TYPE_CHECKING, Optional, Set -from literals import REL_NAME -from snap import KafkaSnap +from ops.pebble import ExecError -if TYPE_CHECKING: - from charm import KafkaCharm +from core.cluster import ClusterState +from core.workload import WorkloadBase logger = logging.getLogger(__name__) @@ -29,20 +27,23 @@ class Acl: username: str -class KafkaAuth: +class AuthManager: """Object for updating Kafka users and ACLs.""" - def __init__(self, charm): - self.charm: "KafkaCharm" = charm - self.zookeeper_connect = self.charm.kafka_config.zookeeper_config.get("connect", "") - self.bootstrap_server = ",".join(self.charm.kafka_config.bootstrap_server) - self.client_properties = self.charm.kafka_config.client_properties_filepath - self.server_properties = self.charm.kafka_config.server_properties_filepath + def __init__(self, state: ClusterState, workload: WorkloadBase, kafka_opts: str): + self.state = state + self.workload = workload + self.kafka_opts = kafka_opts - self.new_user_acls: Set[Acl] = set() + self.zookeeper_connect = self.state.zookeeper.connect + self.bootstrap_server = ",".join(self.state.bootstrap_server) + self.client_properties = self.workload.paths.client_properties + self.server_properties = self.workload.paths.server_properties + + self.new_user_acls: set[Acl] = set() @property - def current_acls(self) -> Set[Acl]: + def current_acls(self) -> set[Acl]: """Sets the current cluster ACLs.""" acls = self._get_acls_from_cluster() return self._parse_acls(acls=acls) @@ -54,12 +55,12 @@ def _get_acls_from_cluster(self) -> str: f"--command-config={self.client_properties}", "--list", ] - acls = KafkaSnap.run_bin_command(bin_keyword="acls", bin_args=command) + acls = self.workload.run_bin_command(bin_keyword="acls", bin_args=command) return acls @staticmethod - def _parse_acls(acls: str) -> Set[Acl]: + def _parse_acls(acls: str) -> set[Acl]: """Parses output from raw ACLs provided by the cluster.""" current_acls = set() resource_type, name, user, operation = None, None, None, None @@ -95,7 +96,7 @@ def _parse_acls(acls: str) -> Set[Acl]: return current_acls @staticmethod - def _generate_producer_acls(topic: str, username: str, **_) -> Set[Acl]: + def _generate_producer_acls(topic: str, username: str, **_) -> set[Acl]: """Generates expected set of `Acl`s for a producer client application.""" producer_acls = set() for operation in ["CREATE", "WRITE", "DESCRIBE"]: @@ -111,9 +112,7 @@ def _generate_producer_acls(topic: str, username: str, **_) -> Set[Acl]: return producer_acls @staticmethod - def _generate_consumer_acls( - topic: str, username: str, group: Optional[str] = None - ) -> Set[Acl]: + def _generate_consumer_acls(topic: str, username: str, group: str | None = None) -> set[Acl]: """Generates expected set of `Acl`s for a consumer client application.""" group = group or f"{username}-" # not needed, just for safety @@ -148,7 +147,7 @@ def add_user(self, username: str, password: str, zk_auth: bool = False) -> None: For use before cluster start Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code + `(subprocess.CalledProcessError | ops.pebble.ExecError)`: if the error returned a non-zero exit code """ base_command = [ "--alter", @@ -164,7 +163,7 @@ def add_user(self, username: str, password: str, zk_auth: bool = False) -> None: f"--zookeeper={self.zookeeper_connect}", f"--zk-tls-config-file={self.server_properties}", ] - opts = [self.charm.kafka_config.kafka_opts] + opts = [self.kafka_opts] else: command = base_command + [ f"--bootstrap-server={self.bootstrap_server}", @@ -172,7 +171,7 @@ def add_user(self, username: str, password: str, zk_auth: bool = False) -> None: ] opts = [] - KafkaSnap.run_bin_command(bin_keyword="configs", bin_args=command, opts=opts) + self.workload.run_bin_command(bin_keyword="configs", bin_args=command, opts=opts) def delete_user(self, username: str) -> None: """Deletes user credentials from ZooKeeper. @@ -181,7 +180,7 @@ def delete_user(self, username: str) -> None: username: the user name to delete Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code + `(subprocess.CalledProcessError | ops.pebble.ExecError)`: if the error returned a non-zero exit code """ command = [ f"--bootstrap-server={self.bootstrap_server}", @@ -192,9 +191,9 @@ def delete_user(self, username: str) -> None: "--delete-config=SCRAM-SHA-512", ] try: - KafkaSnap.run_bin_command(bin_keyword="configs", bin_args=command) - except subprocess.CalledProcessError as e: - if "delete a user credential that does not exist" in e.stderr: + self.workload.run_bin_command(bin_keyword="configs", bin_args=command) + except (subprocess.CalledProcessError, ExecError) as e: + if e.stderr and "delete a user credential that does not exist" in e.stderr: logger.warning(f"User: {username} can't be deleted, it does not exist") return raise @@ -216,7 +215,7 @@ def add_acl( resource_name: the name of the resource to grant ACLs for Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code + `(subprocess.CalledProcessError | ops.pebble.ExecError)`: if the error returned a non-zero exit code """ command = [ f"--bootstrap-server={self.bootstrap_server}", @@ -233,7 +232,7 @@ def add_acl( f"--group={resource_name}", "--resource-pattern-type=PREFIXED", ] - KafkaSnap.run_bin_command(bin_keyword="acls", bin_args=command) + self.workload.run_bin_command(bin_keyword="acls", bin_args=command) def remove_acl( self, username: str, operation: str, resource_type: str, resource_name: str @@ -249,7 +248,7 @@ def remove_acl( resource_name: the name of the resource to remove ACLs for Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code + `(subprocess.CalledProcessError | ops.pebble.ExecError)`: if the error returned a non-zero exit code """ command = [ f"--bootstrap-server={self.bootstrap_server}", @@ -268,7 +267,7 @@ def remove_acl( "--resource-pattern-type=PREFIXED", ] - KafkaSnap.run_bin_command(bin_keyword="acls", bin_args=command) + self.workload.run_bin_command(bin_keyword="acls", bin_args=command) def remove_all_user_acls(self, username: str) -> None: """Removes all active ACLs for a given user. @@ -277,7 +276,7 @@ def remove_all_user_acls(self, username: str) -> None: username: the user name to remove ACLs for Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code + `(subprocess.CalledProcessError | ops.pebble.ExecError)`: if the error returned a non-zero exit code """ # getting subset of all cluster ACLs for only the provided user current_user_acls = {acl for acl in self.current_acls if acl.username == username} @@ -286,7 +285,7 @@ def remove_all_user_acls(self, username: str) -> None: self.remove_acl(**asdict(acl)) def update_user_acls( - self, username: str, topic: str, extra_user_roles: str, group: Optional[str], **_ + self, username: str, topic: str, extra_user_roles: str, group: str | None, **_ ) -> None: """Compares data passed from the client relation, and updating cluster ACLs to match. @@ -304,7 +303,7 @@ def update_user_acls( group: the consumer group Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code + `(subprocess.CalledProcessError | ops.pebble.ExecError)`: if the error returned a non-zero exit code """ if "producer" in extra_user_roles: self.new_user_acls.update(self._generate_producer_acls(topic=topic, username=username)) @@ -323,20 +322,3 @@ def update_user_acls( acls_to_remove = current_user_acls - self.new_user_acls for acl in acls_to_remove: self.remove_acl(**asdict(acl)) - - def clear_users(self) -> None: - """Check existing relations and remove deleted users.""" - current_usernames = [acl.username for acl in self.current_acls] - relation_usernames = [ - f"relation-{relation.id}" for relation in self.charm.model.relations[REL_NAME] - ] - to_remove = [ - username for username in current_usernames if username not in relation_usernames - ] - - for username in to_remove: - self.remove_all_user_acls(username=username) - self.delete_user(username=username) - # non-leader units need cluster_config_changed event to update their super.users - # update on the peer relation data will trigger an update of server properties on all unit - self.charm.app_peer_data.update({username: ""}) diff --git a/src/config.py b/src/managers/config.py similarity index 55% rename from src/config.py rename to src/managers/config.py index 2246fe47..2944d0cc 100644 --- a/src/config.py +++ b/src/managers/config.py @@ -5,29 +5,21 @@ """Manager for handling Kafka configuration.""" import logging -import os -from typing import TYPE_CHECKING, Dict, List, cast - -from ops.model import Unit +from typing import cast +from core.cluster import ClusterState +from core.structured_config import CharmConfig, LogLevel +from core.workload import WorkloadBase from literals import ( ADMIN_USER, INTER_BROKER_USER, - INTERNAL_USERS, JMX_EXPORTER_PORT, JVM_MEM_MAX_GB, JVM_MEM_MIN_GB, - REL_NAME, SECURITY_PROTOCOL_PORTS, - ZK, AuthMechanism, Scope, ) -from structured_config import LogLevel -from utils import map_env, safe_get_file, safe_write_to_file, update_env - -if TYPE_CHECKING: - from charm import KafkaCharm logger = logging.getLogger(__name__) @@ -104,96 +96,20 @@ def advertised_listener(self) -> str: return f"{self.name}://{self.host}:{self.port}" -class KafkaConfig: +class KafkaConfigManager: """Manager for handling Kafka configuration.""" - def __init__(self, charm): - self.charm: "KafkaCharm" = charm - self.server_properties_filepath = f"{self.charm.snap.CONF_PATH}/server.properties" - self.client_properties_filepath = f"{self.charm.snap.CONF_PATH}/client.properties" - self.zk_jaas_filepath = f"{self.charm.snap.CONF_PATH}/zookeeper-jaas.cfg" - self.keystore_filepath = f"{self.charm.snap.CONF_PATH}/keystore.p12" - self.truststore_filepath = f"{self.charm.snap.CONF_PATH}/truststore.jks" - self.jmx_prometheus_javaagent_filepath = ( - f"{self.charm.snap.BINARIES_PATH}/libs/jmx_prometheus_javaagent.jar" - ) - self.jmx_prometheus_config_filepath = f"{self.charm.snap.CONF_PATH}/jmx_prometheus.yaml" - - @property - def internal_user_credentials(self) -> Dict[str, str]: - """The charm internal usernames and passwords, e.g `sync` and `admin`. - - Returns: - Dict of usernames and passwords - """ - credentials = { - user: password - for user in INTERNAL_USERS - if (password := self.charm.get_secret(scope="app", key=f"{user}-password")) - } - - if not len(credentials) == len(INTERNAL_USERS): - return {} - - return credentials - - @property - def zookeeper_config(self) -> Dict[str, str]: - """The config from current ZooKeeper relations for data necessary for broker connection. - - Returns: - Dict of ZooKeeeper: - `username`, `password`, `endpoints`, `chroot`, `connect`, `uris` and `tls` - """ - zookeeper_config = {} - # loop through all relations to ZK, attempt to find all needed config - for relation in self.charm.model.relations[ZK]: - if not relation.app: - continue - - zk_keys = ["username", "password", "endpoints", "chroot", "uris", "tls"] - missing_config = any( - relation.data[relation.app].get(key, None) is None for key in zk_keys - ) - - # skip if config is missing - if missing_config: - continue - - # set if exists - zookeeper_config.update(relation.data[relation.app]) - break - - if zookeeper_config: - sorted_uris = sorted( - zookeeper_config["uris"].replace(zookeeper_config["chroot"], "").split(",") - ) - sorted_uris[-1] = sorted_uris[-1] + zookeeper_config["chroot"] - zookeeper_config["connect"] = ",".join(sorted_uris) - - return zookeeper_config - - @property - def zookeeper_related(self) -> bool: - """Checks if there is a relation with ZooKeeper. - - Returns: - True if there is a ZooKeeper relation. Otherwise False - """ - return bool(self.charm.model.relations[ZK]) - - @property - def zookeeper_connected(self) -> bool: - """Checks if there is an active ZooKeeper relation with all necessary data. - - Returns: - True if ZooKeeper is currently related with sufficient relation data - for a broker to connect with. Otherwise False - """ - if self.zookeeper_config.get("connect", None): - return True - - return False + def __init__( + self, + state: ClusterState, + workload: WorkloadBase, + config: CharmConfig, + current_version: str, + ): + self.state = state + self.workload = workload + self.config = config + self.current_version = current_version @property def log_level(self) -> str: @@ -203,9 +119,9 @@ def log_level(self) -> str: String with these possible values: DEBUG, INFO, WARN, ERROR """ # Remapping to WARN that is generally used in Java applications based on log4j and logback. - if self.charm.config.log_level == LogLevel.WARNING.value: + if self.config.log_level == LogLevel.WARNING.value: return "WARN" - return self.charm.config.log_level + return self.config.log_level @property def jmx_opts(self) -> str: @@ -216,7 +132,7 @@ def jmx_opts(self) -> str: """ opts = [ "-Dcom.sun.management.jmxremote", - f"-javaagent:{self.jmx_prometheus_javaagent_filepath}={JMX_EXPORTER_PORT}:{self.jmx_prometheus_config_filepath}", + f"-javaagent:{self.workload.paths.jmx_prometheus_javaagent}={JMX_EXPORTER_PORT}:{self.workload.paths.jmx_prometheus_config}", ] return f"KAFKA_JMX_OPTS='{' '.join(opts)}'" @@ -247,9 +163,7 @@ def heap_opts(self) -> str: Returns: String of JVM heap memory options """ - target_memory = ( - JVM_MEM_MIN_GB if self.charm.config.profile == "testing" else JVM_MEM_MAX_GB - ) + target_memory = JVM_MEM_MIN_GB if self.config.profile == "testing" else JVM_MEM_MAX_GB opts = [ f"-Xms{target_memory}G", f"-Xmx{target_memory}G", @@ -265,39 +179,20 @@ def kafka_opts(self) -> str: String of Java config options """ opts = [ - f"-Djava.security.auth.login.config={self.zk_jaas_filepath}", + f"-Djava.security.auth.login.config={self.workload.paths.zk_jaas}", f"-Dcharmed.kafka.log.level={self.log_level}", ] return f"KAFKA_OPTS='{' '.join(opts)}'" @property - def bootstrap_server(self) -> List[str]: - """The current Kafka uris formatted for the `bootstrap-server` command flag. - - Returns: - List of `bootstrap-server` servers - """ - if not self.charm.peer_relation: - return [] - - units: List[Unit] = list(set([self.charm.unit] + list(self.charm.peer_relation.units))) - hosts = [self.charm.peer_relation.data[unit].get("private-address") for unit in units] - port = ( - SECURITY_PROTOCOL_PORTS["SASL_SSL"].client - if (self.charm.tls.enabled and self.charm.tls.certificate) - else SECURITY_PROTOCOL_PORTS["SASL_PLAINTEXT"].client - ) - return [f"{host}:{port}" for host in hosts] - - @property - def default_replication_properties(self) -> List[str]: + def default_replication_properties(self) -> list[str]: """Builds replication-related properties based on the expected app size. Returns: List of properties to be set """ - replication_factor = min([3, self.charm.app.planned_units()]) + replication_factor = min([3, self.state.planned_units]) min_isr = max([1, replication_factor - 1]) return [ @@ -310,20 +205,19 @@ def default_replication_properties(self) -> List[str]: ] @property - def auth_properties(self) -> List[str]: + def auth_properties(self) -> list[str]: """Builds properties necessary for inter-broker authorization through ZooKeeper. Returns: List of properties to be set """ - broker_id = self.charm.unit.name.split("/")[1] return [ - f"broker.id={broker_id}", - f'zookeeper.connect={self.zookeeper_config["connect"]}', + f"broker.id={self.state.broker.unit_id}", + f"zookeeper.connect={self.state.zookeeper.connect}", ] @property - def zookeeper_tls_properties(self) -> List[str]: + def zookeeper_tls_properties(self) -> list[str]: """Builds the properties necessary for SSL connections to ZooKeeper. Returns: @@ -331,36 +225,36 @@ def zookeeper_tls_properties(self) -> List[str]: """ return [ "zookeeper.ssl.client.enable=true", - f"zookeeper.ssl.truststore.location={self.truststore_filepath}", - f"zookeeper.ssl.truststore.password={self.charm.tls.truststore_password}", + f"zookeeper.ssl.truststore.location={self.workload.paths.truststore}", + f"zookeeper.ssl.truststore.password={self.state.broker.truststore_password}", "zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty", ] @property - def tls_properties(self) -> List[str]: + def tls_properties(self) -> list[str]: """Builds the properties necessary for TLS authentication. Returns: List of properties to be set """ - mtls = "required" if self.charm.tls.mtls_enabled else "none" + mtls = "required" if self.state.cluster.mtls_enabled else "none" return [ - f"ssl.truststore.location={self.truststore_filepath}", - f"ssl.truststore.password={self.charm.tls.truststore_password}", - f"ssl.keystore.location={self.keystore_filepath}", - f"ssl.keystore.password={self.charm.tls.keystore_password}", + f"ssl.truststore.location={self.workload.paths.truststore}", + f"ssl.truststore.password={self.state.broker.truststore_password}", + f"ssl.keystore.location={self.workload.paths.keystore}", + f"ssl.keystore.password={self.state.broker.keystore_password}", f"ssl.client.auth={mtls}", ] @property - def scram_properties(self) -> List[str]: + def scram_properties(self) -> list[str]: """Builds the properties for each scram listener. Returns: list of scram properties to be set """ username = INTER_BROKER_USER - password = self.internal_user_credentials.get(INTER_BROKER_USER, "") + password = self.state.cluster.internal_user_credentials.get(INTER_BROKER_USER, "") scram_properties = [ f'listener.name.{self.internal_listener.name.lower()}.scram-sha-512.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="{username}" password="{password}";' @@ -381,81 +275,44 @@ def security_protocol(self) -> AuthMechanism: # FIXME: When we have multiple auth_mechanims/listeners, remove this method return ( "SASL_SSL" - if (self.charm.tls.enabled and self.charm.tls.certificate) + if (self.state.cluster.tls_enabled and self.state.broker.certificate) else "SASL_PLAINTEXT" ) @property - def auth_mechanisms(self) -> List[AuthMechanism]: + def auth_mechanisms(self) -> list[AuthMechanism]: """Return a list of enabled auth mechanisms.""" # TODO: At the moment only one mechanism for extra listeners. Will need to be # extended with more depending on configuration settings. protocol = [self.security_protocol] - if self.charm.tls.mtls_enabled: + if self.state.cluster.mtls_enabled: protocol += ["SSL"] - return cast(List[AuthMechanism], protocol) + return cast(list[AuthMechanism], protocol) @property def internal_listener(self) -> Listener: """Return the internal listener.""" protocol = self.security_protocol - return Listener(host=self.charm.unit_host, protocol=protocol, scope="INTERNAL") + return Listener(host=self.state.broker.host, protocol=protocol, scope="INTERNAL") @property - def client_listeners(self) -> List[Listener]: + def client_listeners(self) -> list[Listener]: """Return a list of extra listeners.""" # if there is a relation with kafka then add extra listener - if not self.charm.model.relations.get(REL_NAME, None): + if not self.state.client_relations: return [] return [ - Listener(host=self.charm.unit_host, protocol=auth, scope="CLIENT") + Listener(host=self.state.broker.host, protocol=auth, scope="CLIENT") for auth in self.auth_mechanisms ] @property - def all_listeners(self) -> List[Listener]: + def all_listeners(self) -> list[Listener]: """Return a list with all expected listeners.""" return [self.internal_listener] + self.client_listeners - @property - def super_users(self) -> str: - """Generates all users with super/admin permissions for the cluster from relations. - - Formatting allows passing to the `super.users` property. - - Returns: - Semicolon delimited string of current super users - """ - super_users = set(INTERNAL_USERS) - for relation in self.charm.model.relations[REL_NAME]: - if not relation or not relation.app or not self.charm.peer_relation: - continue - - extra_user_roles = relation.data[relation.app].get("extra-user-roles", "") - password = self.charm.peer_relation.data[self.charm.app].get( - f"relation-{relation.id}", None - ) - # if passwords are set for client admins, they're good to load - if "admin" in extra_user_roles and password is not None: - super_users.add(f"relation-{relation.id}") - - super_users_arg = sorted([f"User:{user}" for user in super_users]) - - return ";".join(super_users_arg) - - @property - def log_dirs(self) -> str: - """Builds the necessary log.dirs based on mounted storage volumes. - - Returns: - String of log.dirs property value to be set - """ - return ",".join( - [os.fspath(storage.location) for storage in self.charm.model.storages["data"]] - ) - @property def inter_broker_protocol_version(self) -> str: """Creates the protocol version from the kafka version. @@ -464,22 +321,22 @@ def inter_broker_protocol_version(self) -> str: String with the `major.minor` version """ # Remove patch number from full vervion. - major_minor = self.charm.upgrade.current_version.split(".", maxsplit=2) + major_minor = self.current_version.split(".", maxsplit=2) return ".".join(major_minor[:2]) @property - def rack_properties(self) -> List[str]: + def rack_properties(self) -> list[str]: """Builds all properties related to rack awareness configuration. Returns: List of properties to be set """ # TODO: not sure if we should make this an instance attribute like the other paths - rack_path = f"{self.charm.snap.CONF_PATH}/rack.properties" - return safe_get_file(rack_path) or [] + rack_path = f"{self.workload.paths.conf_path}/rack.properties" + return self.workload.read(rack_path) or [] @property - def client_properties(self) -> List[str]: + def client_properties(self) -> list[str]: """Builds all properties necessary for running an admin Kafka client. This includes SASL/SCRAM auth and security mechanisms. @@ -488,23 +345,23 @@ def client_properties(self) -> List[str]: List of properties to be set """ username = ADMIN_USER - password = self.internal_user_credentials.get(ADMIN_USER, "") + password = self.state.cluster.internal_user_credentials.get(ADMIN_USER, "") client_properties = [ f'sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="{username}" password="{password}";', "sasl.mechanism=SCRAM-SHA-512", f"security.protocol={self.security_protocol}", # FIXME: security.protocol will need changing once multiple listener auth schemes - f"bootstrap.servers={','.join(self.bootstrap_server)}", + f"bootstrap.servers={','.join(self.state.bootstrap_server)}", ] - if self.charm.tls.enabled and self.charm.tls.certificate: + if self.state.cluster.tls_enabled and self.state.broker.certificate: client_properties += self.tls_properties return client_properties @property - def server_properties(self) -> List[str]: + def server_properties(self) -> list[str]: """Builds all properties necessary for starting Kafka service. This includes charm config, replication, SASL/SCRAM auth and default properties. @@ -521,8 +378,8 @@ def server_properties(self) -> List[str]: properties = ( [ - f"super.users={self.super_users}", - f"log.dirs={self.log_dirs}", + f"super.users={self.state.super_users}", + f"log.dirs={self.state.log_dirs}", f"listener.security.protocol.map={','.join(protocol_map)}", f"listeners={','.join(listeners_repr)}", f"advertised.listeners={','.join(advertised_listeners)}", @@ -537,54 +394,50 @@ def server_properties(self) -> List[str]: + DEFAULT_CONFIG_OPTIONS.split("\n") ) - if self.charm.tls.enabled and self.charm.tls.certificate: + if self.state.cluster.tls_enabled and self.state.broker.certificate: properties += self.tls_properties + self.zookeeper_tls_properties return properties - @staticmethod - def _translate_config_key(key): - """Format config names into server properties, blacklisted property are commented out. - - Returns: - String with Kafka configuration name to be placed in the server.properties file - """ - return key.replace("_", ".") if key not in SERVER_PROPERTIES_BLACKLIST else f"# {key}" - @property - def config_properties(self) -> List[str]: + def config_properties(self) -> list[str]: """Configure server properties from config.""" return [ f"{self._translate_config_key(conf_key)}={str(value)}" - for conf_key, value in self.charm.config.dict().items() + for conf_key, value in self.config.dict().items() if value is not None ] + @property + def zk_jaas_config(self) -> str: + """Builds the JAAS config for Client authentication with ZooKeeper. + + Returns: + String of Jaas config for ZooKeeper auth + """ + return f""" +Client {{ + org.apache.zookeeper.server.auth.DigestLoginModule required + username="{self.state.zookeeper.username}" + password="{self.state.zookeeper.password}"; +}}; + + """ + def set_zk_jaas_config(self) -> None: """Writes the ZooKeeper JAAS config using ZooKeeper relation data.""" - jaas_config = f""" - Client {{ - org.apache.zookeeper.server.auth.DigestLoginModule required - username="{self.zookeeper_config['username']}" - password="{self.zookeeper_config['password']}"; - }}; - """ - safe_write_to_file(content=jaas_config, path=self.zk_jaas_filepath, mode="w") + self.workload.write(content=self.zk_jaas_config, path=self.workload.paths.zk_jaas) def set_server_properties(self) -> None: """Writes all Kafka config properties to the `server.properties` path.""" - safe_write_to_file( - content="\n".join(self.server_properties), - path=self.server_properties_filepath, - mode="w", + self.workload.write( + content="\n".join(self.server_properties), path=self.workload.paths.server_properties ) def set_client_properties(self) -> None: """Writes all client config properties to the `client.properties` path.""" - safe_write_to_file( - content="\n".join(self.client_properties), - path=self.client_properties_filepath, - mode="w", + self.workload.write( + content="\n".join(self.client_properties), path=self.workload.paths.client_properties ) def set_environment(self) -> None: @@ -595,4 +448,29 @@ def set_environment(self) -> None: self.jvm_performance_opts, self.heap_opts, ] - update_env(env=map_env(env=updated_env_list)) + + def map_env(env: list[str]) -> dict[str, str]: + map_env = {} + for var in env: + key = "".join(var.split("=", maxsplit=1)[0]) + value = "".join(var.split("=", maxsplit=1)[1:]) + if key: + # only check for keys, as we can have an empty value for a variable + map_env[key] = value + return map_env + + raw_current_env = self.workload.read("/etc/environment") + current_env = map_env(raw_current_env) + + updated_env = current_env | map_env(updated_env_list) + content = "\n".join([f"{key}={value}" for key, value in updated_env.items()]) + self.workload.write(content=content, path="/etc/environment") + + @staticmethod + def _translate_config_key(key: str): + """Format config names into server properties, blacklisted property are commented out. + + Returns: + String with Kafka configuration name to be placed in the server.properties file + """ + return key.replace("_", ".") if key not in SERVER_PROPERTIES_BLACKLIST else f"# {key}" diff --git a/src/managers/tls.py b/src/managers/tls.py new file mode 100644 index 00000000..389fbd63 --- /dev/null +++ b/src/managers/tls.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling Kafka TLS configuration.""" + +import logging +import subprocess + +from ops.pebble import ExecError + +from core.cluster import ClusterState +from core.workload import WorkloadBase +from literals import GROUP, USER, Substrate + +logger = logging.getLogger(__name__) + + +class TLSManager: + """Manager for building necessary files for Java TLS auth.""" + + def __init__(self, state: ClusterState, workload: WorkloadBase, substrate: Substrate): + self.state = state + self.workload = workload + self.substrate = substrate + + self.keytool = "charmed-kafka.keytool" if self.substrate == "vm" else "keytool" + + def generate_alias(self, app_name: str, relation_id: int) -> str: + """Generate an alias from a relation. Used to identify ca certs.""" + return f"{app_name}-{relation_id}" + + def set_server_key(self) -> None: + """Sets the unit private-key.""" + if not self.state.broker.private_key: + logger.error("Can't set private-key to unit, missing private-key in relation data") + return + + self.workload.write( + content=self.state.broker.private_key, + path=f"{self.workload.paths.conf_path}/server.key", + ) + + def set_ca(self) -> None: + """Sets the unit ca.""" + if not self.state.broker.ca: + logger.error("Can't set CA to unit, missing CA in relation data") + return + + self.workload.write( + content=self.state.broker.ca, path=f"{self.workload.paths.conf_path}/ca.pem" + ) + + def set_certificate(self) -> None: + """Sets the unit certificate.""" + if not self.state.broker.certificate: + logger.error("Can't set certificate to unit, missing certificate in relation data") + return + + self.workload.write( + content=self.state.broker.certificate, + path=f"{self.workload.paths.conf_path}/server.pem", + ) + + def set_truststore(self) -> None: + """Adds CA to JKS truststore.""" + command = f"{self.keytool} -import -v -alias ca -file ca.pem -keystore truststore.jks -storepass {self.state.broker.truststore_password} -noprompt" + try: + self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) + self.workload.exec(f"chown {USER}:{GROUP} {self.workload.paths.truststore}") + self.workload.exec(f"chmod 770 {self.workload.paths.truststore}") + except (subprocess.CalledProcessError, ExecError) as e: + # in case this reruns and fails + if e.stdout and "already exists" in e.stdout: + return + logger.error(e.stdout) + raise e + + def set_keystore(self) -> None: + """Creates and adds unit cert and private-key to the keystore.""" + command = f"openssl pkcs12 -export -in server.pem -inkey server.key -passin pass:{self.state.broker.keystore_password} -certfile server.pem -out keystore.p12 -password pass:{self.state.broker.keystore_password}" + try: + self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) + self.workload.exec(f"chown {USER}:{GROUP} {self.workload.paths.keystore}") + self.workload.exec(f"chmod 770 {self.workload.paths.keystore}") + except (subprocess.CalledProcessError, ExecError) as e: + logger.error(e.stdout) + raise e + + def import_cert(self, alias: str, filename: str) -> None: + """Add a certificate to the truststore.""" + command = f"{self.keytool} -import -v -alias {alias} -file {filename} -keystore truststore.jks -storepass {self.state.broker.truststore_password} -noprompt" + try: + self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) + except (subprocess.CalledProcessError, ExecError) as e: + # in case this reruns and fails + if e.stdout and "already exists" in e.stdout: + logger.debug(e.stdout) + return + logger.error(e.stdout) + raise e + + def remove_cert(self, alias: str) -> None: + """Remove a cert from the truststore.""" + try: + command = f"{self.keytool} -delete -v -alias {alias} -keystore truststore.jks -storepass {self.state.broker.truststore_password} -noprompt" + self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) + self.workload.exec(f"rm -f {alias}.pem", working_dir=self.workload.paths.conf_path) + except (subprocess.CalledProcessError, ExecError) as e: + if e.stdout and "does not exist" in e.stdout: + logger.warning(e.stdout) + return + logger.error(e.stdout) + raise e + + def remove_stores(self) -> None: + """Cleans up all keys/certs/stores on a unit.""" + try: + self.workload.exec( + command="rm -rf *.pem *.key *.p12 *.jks", + working_dir=self.workload.paths.conf_path, + ) + except (subprocess.CalledProcessError, ExecError) as e: + logger.error(e.stdout) + raise e diff --git a/src/tls.py b/src/tls.py deleted file mode 100644 index 1e913718..00000000 --- a/src/tls.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Manager for handling Kafka TLS configuration.""" - -import json -import logging -import socket -import subprocess -from typing import TYPE_CHECKING, Dict, List, Optional - -from charms.tls_certificates_interface.v1.tls_certificates import ( - CertificateAvailableEvent, - TLSCertificatesRequiresV1, - _load_relation_data, - generate_csr, - generate_private_key, -) -from ops.charm import ( - ActionEvent, - RelationBrokenEvent, - RelationChangedEvent, - RelationJoinedEvent, -) -from ops.framework import Object -from ops.model import ActiveStatus, BlockedStatus - -from literals import TLS_RELATION, TRUSTED_CA_RELATION, TRUSTED_CERTIFICATE_RELATION -from utils import ( - generate_password, - parse_tls_file, - safe_write_to_file, - set_snap_mode_bits, - set_snap_ownership, -) - -if TYPE_CHECKING: - from charm import KafkaCharm - -logger = logging.getLogger(__name__) - - -class KafkaTLS(Object): - """Handler for managing the client and unit TLS keys/certs.""" - - def __init__(self, charm): - super().__init__(charm, "tls") - self.charm: "KafkaCharm" = charm - self.certificates = TLSCertificatesRequiresV1(self.charm, TLS_RELATION) - - # Own certificates handlers - self.framework.observe( - self.charm.on[TLS_RELATION].relation_created, self._tls_relation_created - ) - self.framework.observe( - self.charm.on[TLS_RELATION].relation_joined, self._tls_relation_joined - ) - self.framework.observe( - self.charm.on[TLS_RELATION].relation_broken, self._tls_relation_broken - ) - self.framework.observe( - getattr(self.certificates.on, "certificate_available"), self._on_certificate_available - ) - self.framework.observe( - getattr(self.certificates.on, "certificate_expiring"), self._on_certificate_expiring - ) - self.framework.observe( - getattr(self.charm.on, "set_tls_private_key_action"), self._set_tls_private_key - ) - - # External certificates handlers (for mTLS) - for relation in [TRUSTED_CERTIFICATE_RELATION, TRUSTED_CA_RELATION]: - self.framework.observe( - self.charm.on[relation].relation_created, - self._trusted_relation_created, - ) - self.framework.observe( - self.charm.on[relation].relation_joined, - self._trusted_relation_joined, - ) - self.framework.observe( - self.charm.on[relation].relation_changed, - self._trusted_relation_changed, - ) - self.framework.observe( - self.charm.on[relation].relation_broken, - self._trusted_relation_broken, - ) - - def _tls_relation_created(self, _) -> None: - """Handler for `certificates_relation_created` event.""" - if not self.charm.unit.is_leader() or not self.charm.peer_relation: - return - - self.charm.app_peer_data.update({"tls": "enabled"}) - - def _tls_relation_joined(self, _) -> None: - """Handler for `certificates_relation_joined` event.""" - # generate unit private key if not already created by action - if not self.private_key: - self.charm.set_secret( - scope="unit", key="private-key", value=generate_private_key().decode("utf-8") - ) - - # generate unit private key if not already created by action - if not self.keystore_password: - self.charm.set_secret(scope="unit", key="keystore-password", value=generate_password()) - if not self.truststore_password: - self.charm.set_secret( - scope="unit", key="truststore-password", value=generate_password() - ) - - self._request_certificate() - - def _tls_relation_broken(self, _) -> None: - """Handler for `certificates_relation_broken` event.""" - self.charm.set_secret(scope="unit", key="csr", value="") - self.charm.set_secret(scope="unit", key="certificate", value="") - self.charm.set_secret(scope="unit", key="ca", value="") - - # remove all existing keystores from the unit so we don't preserve certs - self.remove_stores() - - if not self.charm.unit.is_leader(): - return - - self.charm.app_peer_data.update({"tls": ""}) - - def _trusted_relation_created(self, _) -> None: - """Handle relation created event to trusted tls charm.""" - if not self.charm.unit.is_leader(): - return - - if not self.enabled: - msg = "Own certificates are not set. Please relate using 'certificates' relation first" - logger.error(msg) - self.charm.app.status = BlockedStatus(msg) - return - - # Create a "mtls" flag so a new listener (CLIENT_SSL) is created - self.charm.app_peer_data.update({"mtls": "enabled"}) - self.charm.app.status = ActiveStatus() - - def _trusted_relation_joined(self, event: RelationJoinedEvent) -> None: - """Generate a CSR so the tls-certificates operator works as expected.""" - # Once the certificates have been added, TLS setup has finished - if not self.certificate: - logger.debug("Missing TLS relation, deferring") - event.defer() - return - - alias = self.generate_alias( - app_name=event.app.name, # pyright: ignore[reportOptionalMemberAccess] - relation_id=event.relation.id, - ) - csr = ( - generate_csr( - add_unique_id_to_subject_name=bool(alias), - private_key=self.private_key.encode( # pyright: ignore[reportOptionalMemberAccess] - "utf-8" - ), - subject=self.charm.unit_peer_data.get("private-address", ""), - sans_ip=self._sans["sans_ip"], - sans_dns=self._sans["sans_dns"], - ) - .decode() - .strip() - ) - - csr_dict = [{"certificate_signing_request": csr}] - event.relation.data[self.model.unit]["certificate_signing_requests"] = json.dumps(csr_dict) - - def _trusted_relation_changed(self, event: RelationChangedEvent) -> None: - """Overrides the requirer logic of TLSInterface.""" - # Once the certificates have been added, TLS setup has finished - if not self.certificate: - logger.debug("Missing TLS relation, deferring") - event.defer() - return - - relation_data = _load_relation_data(dict(event.relation.data[event.relation.app])) # type: ignore[reportOptionalMemberAccess] - provider_certificates = relation_data.get("certificates", []) - - if not provider_certificates: - logger.warning("No certificates on provider side") - event.defer() - return - - alias = self.generate_alias( - event.relation.app.name, # pyright: ignore[reportOptionalMemberAccess] - event.relation.id, - ) - # NOTE: Relation should only be used with one set of certificates, - # hence using just the first item on the list. - content = ( - provider_certificates[0]["certificate"] - if event.relation.name == TRUSTED_CERTIFICATE_RELATION - else provider_certificates[0]["ca"] - ) - filename = f"{alias}.pem" - safe_write_to_file(content=content, path=f"{self.charm.snap.CONF_PATH}/{filename}") - self.import_cert(alias=f"{alias}", filename=filename) - - # ensuring new config gets applied - self.charm.on[f"{self.charm.restart.name}"].acquire_lock.emit() - - def _trusted_relation_broken(self, event: RelationBrokenEvent) -> None: - """Handle relation broken for a trusted certificate/ca relation.""" - # Once the certificates have been added, TLS setup has finished - if not self.certificate: - logger.debug("Missing TLS relation, deferring") - event.defer() - return - - # All units will need to remove the cert from their truststore - alias = self.generate_alias( - app_name=event.relation.app.name, # pyright: ignore[reportOptionalMemberAccess] - relation_id=event.relation.id, - ) - self.remove_cert(alias=alias) - - # The leader will also handle removing the "mtls" flag if needed - if not self.charm.unit.is_leader(): - return - - # Get all relations, and remove the one being broken - all_relations = ( - self.model.relations[TRUSTED_CA_RELATION] - + self.model.relations[TRUSTED_CERTIFICATE_RELATION] - ) - all_relations.remove(event.relation) - logger.debug(f"Remaining relations: {all_relations}") - - # No relations means that there are no certificates left in the truststore - if not all_relations: - self.charm.app_peer_data.update({"mtls": ""}) - - def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: - """Handler for `certificates_available` event after provider updates signed certs.""" - if not self.charm.peer_relation: - logger.warning("No peer relation on certificate available") - event.defer() - return - - # avoid setting tls files and restarting - if event.certificate_signing_request != self.csr: - logger.error("Can't use certificate, found unknown CSR") - return - - self.charm.set_secret(scope="unit", key="certificate", value=event.certificate) - self.charm.set_secret(scope="unit", key="ca", value=event.ca) - - self.set_server_key() - self.set_ca() - self.set_certificate() - self.set_truststore() - self.set_keystore() - - def _on_certificate_expiring(self, _) -> None: - """Handler for `certificate_expiring` event.""" - if not self.private_key or not self.csr or not self.charm.peer_relation: - logger.error("Missing unit private key and/or old csr") - return - new_csr = generate_csr( - private_key=self.private_key.encode("utf-8"), - subject=self.charm.unit_peer_data.get("private-address", ""), - sans_ip=self._sans["sans_ip"], - sans_dns=self._sans["sans_dns"], - ) - - self.certificates.request_certificate_renewal( - old_certificate_signing_request=self.csr.encode("utf-8"), - new_certificate_signing_request=new_csr, - ) - - self.charm.set_secret(scope="unit", key="csr", value=new_csr.decode("utf-8").strip()) - - def _set_tls_private_key(self, event: ActionEvent) -> None: - """Handler for `set_tls_private_key` action.""" - private_key = ( - parse_tls_file(key) - if (key := event.params.get("internal-key")) - else generate_private_key().decode("utf-8") - ) - - self.charm.set_secret(scope="unit", key="private-key", value=private_key) - - self._on_certificate_expiring(event) - - @property - def enabled(self) -> bool: - """Flag to check if the cluster should run with TLS. - - Returns: - True if TLS encryption should be active. Otherwise False - """ - return self.charm.app_peer_data.get("tls", "disabled") == "enabled" - - @property - def mtls_enabled(self) -> bool: - """Flag to check if the cluster should run with mTLS. - - Returns: - True if TLS encryption should be active. Otherwise False - """ - return self.charm.app_peer_data.get("mtls", "disabled") == "enabled" - - @property - def private_key(self) -> Optional[str]: - """The unit private-key set during `certificates_joined`. - - Returns: - String of key contents - None if key not yet generated - """ - return self.charm.get_secret(scope="unit", key="private-key") - - @property - def csr(self) -> Optional[str]: - """The unit cert signing request. - - Returns: - String of csr contents - None if csr not yet generated - """ - return self.charm.get_secret(scope="unit", key="csr") - - @property - def certificate(self) -> Optional[str]: - """The signed unit certificate from the provider relation. - - Returns: - String of cert contents in PEM format - None if cert not yet generated/signed - """ - return self.charm.get_secret(scope="unit", key="certificate") - - @property - def ca(self) -> Optional[str]: - """The ca used to sign unit cert. - - Returns: - String of ca contents in PEM format - None if cert not yet generated/signed - """ - return self.charm.get_secret(scope="unit", key="ca") - - @property - def keystore_password(self) -> Optional[str]: - """The unit keystore password set during `certificates_joined`. - - Returns: - String of password - None if password not yet generated - """ - return self.charm.get_secret(scope="unit", key="keystore-password") - - @property - def truststore_password(self) -> Optional[str]: - """The unit truststore password set during `certificates_joined`. - - Returns: - String of password - None if password not yet generated - """ - return self.charm.get_secret(scope="unit", key="truststore-password") - - def _request_certificate(self): - """Generates and submits CSR to provider.""" - if not self.private_key or not self.charm.peer_relation: - logger.error("Can't request certificate, missing private key") - return - - csr = generate_csr( - private_key=self.private_key.encode("utf-8"), - subject=self.charm.unit_peer_data.get("private-address", ""), - sans_ip=self._sans["sans_ip"], - sans_dns=self._sans["sans_dns"], - ) - self.charm.set_secret(scope="unit", key="csr", value=csr.decode("utf-8").strip()) - - self.certificates.request_certificate_creation(certificate_signing_request=csr) - - @property - def _extra_sans(self) -> List[str]: - """Parse the certificate_extra_sans config option.""" - extra_sans = self.charm.config.certificate_extra_sans or "" - parsed_sans = [] - - if extra_sans == "": - return parsed_sans - - for sans in extra_sans.split(","): - parsed_sans.append(sans.replace("{unit}", self.charm.unit.name.split("/")[1])) - - return parsed_sans - - @property - def _sans(self) -> Dict[str, List[str]]: - """Builds a SAN dict of DNS names and IPs for the unit.""" - return { - "sans_ip": [self.charm.unit_host], - "sans_dns": [self.charm.unit.name, socket.getfqdn()] + self._extra_sans, - } - - def generate_alias(self, app_name: str, relation_id: int) -> str: - """Generate an alias from a relation. Used to identify ca certs.""" - return f"{app_name}-{relation_id}" - - def set_server_key(self) -> None: - """Sets the unit private-key.""" - if not self.private_key: - logger.error("Can't set private-key to unit, missing private-key in relation data") - return - - safe_write_to_file( - content=self.private_key, path=f"{self.charm.snap.CONF_PATH}/server.key" - ) - - def set_ca(self) -> None: - """Sets the unit ca.""" - if not self.ca: - logger.error("Can't set CA to unit, missing CA in relation data") - return - - safe_write_to_file(content=self.ca, path=f"{self.charm.snap.CONF_PATH}/ca.pem") - - def set_certificate(self) -> None: - """Sets the unit certificate.""" - if not self.certificate: - logger.error("Can't set certificate to unit, missing certificate in relation data") - return - - safe_write_to_file( - content=self.certificate, path=f"{self.charm.snap.CONF_PATH}/server.pem" - ) - - def set_truststore(self) -> None: - """Adds CA to JKS truststore.""" - try: - subprocess.check_output( - f"charmed-kafka.keytool -import -v -alias ca -file ca.pem -keystore truststore.jks -storepass {self.truststore_password} -noprompt", - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True, - cwd=self.charm.snap.CONF_PATH, - ) - set_snap_ownership(path=f"{self.charm.snap.CONF_PATH}/truststore.jks") - set_snap_mode_bits(path=f"{self.charm.snap.CONF_PATH}/truststore.jks") - except subprocess.CalledProcessError as e: - # in case this reruns and fails - if "already exists" in e.output: - return - logger.error(e.output) - raise e - - def set_keystore(self) -> None: - """Creates and adds unit cert and private-key to the keystore.""" - try: - subprocess.check_output( - f"openssl pkcs12 -export -in server.pem -inkey server.key -passin pass:{self.keystore_password} -certfile server.pem -out keystore.p12 -password pass:{self.keystore_password}", - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True, - cwd=self.charm.snap.CONF_PATH, - ) - set_snap_ownership(path=f"{self.charm.snap.CONF_PATH}/keystore.p12") - set_snap_mode_bits(path=f"{self.charm.snap.CONF_PATH}/keystore.p12") - except subprocess.CalledProcessError as e: - logger.error(e.output) - raise e - - def import_cert(self, alias: str, filename: str) -> None: - """Add a certificate to the truststore.""" - try: - subprocess.check_output( - f"charmed-kafka.keytool -import -v -alias {alias} -file {filename} -keystore truststore.jks -storepass {self.truststore_password} -noprompt", - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True, - cwd=self.charm.snap.CONF_PATH, - ) - except subprocess.CalledProcessError as e: - # in case this reruns and fails - if "already exists" in e.output: - logger.debug(e.output) - return - logger.error(e.output) - raise e - - def remove_cert(self, alias: str) -> None: - """Remove a cert from the truststore.""" - try: - subprocess.check_output( - f"charmed-kafka.keytool -delete -v -alias {alias} -keystore truststore.jks -storepass {self.truststore_password} -noprompt", - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True, - cwd=self.charm.snap.CONF_PATH, - ) - subprocess.check_output( - f"rm -f {alias}.pem", - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True, - cwd=self.charm.snap.CONF_PATH, - ) - except subprocess.CalledProcessError as e: - if "does not exist" in e.output: - logger.warning(e.output) - return - logger.error(e.output) - raise e - - def remove_stores(self) -> None: - """Cleans up all keys/certs/stores on a unit.""" - try: - subprocess.check_output( - "rm -rf *.pem *.key *.p12 *.jks", - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True, - cwd=self.charm.snap.CONF_PATH, - ) - except subprocess.CalledProcessError as e: - logger.error(e.output) - raise e diff --git a/src/utils.py b/src/utils.py deleted file mode 100644 index 66bfd3dc..00000000 --- a/src/utils.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Collection of helper methods for checking active connections between ZK and Kafka.""" - -import base64 -import logging -import os -import re -import secrets -import shutil -import string -from typing import Dict, List, Optional, Set - -from charms.zookeeper.v0.client import QuorumLeaderNotFoundError, ZooKeeperManager -from kazoo.exceptions import AuthFailedError, NoNodeError -from ops.model import Unit -from tenacity import retry -from tenacity.retry import retry_if_not_result -from tenacity.stop import stop_after_attempt -from tenacity.wait import wait_fixed - -logger = logging.getLogger(__name__) - - -@retry( - # retry to give ZK time to update its broker zNodes before failing - wait=wait_fixed(6), - stop=stop_after_attempt(10), - retry_error_callback=(lambda state: state.outcome.result()), # type: ignore - retry=retry_if_not_result(lambda result: True if result else False), -) -def broker_active(unit: Unit, zookeeper_config: Dict[str, str]) -> bool: - """Checks ZooKeeper for client connections, checks for specific broker id. - - Args: - unit: the `Unit` to check connection of - zookeeper_config: the relation provided by ZooKeeper - - Returns: - True if broker id is recognised as active by ZooKeeper. Otherwise False. - """ - broker_id = unit.name.split("/")[1] - brokers = get_active_brokers(zookeeper_config=zookeeper_config) - chroot = zookeeper_config.get("chroot", "") - return f"{chroot}/brokers/ids/{broker_id}" in brokers - - -def get_active_brokers(zookeeper_config: Dict[str, str]) -> Set[str]: - """Gets all brokers currently connected to ZooKeeper. - - Args: - zookeeper_config: the relation data provided by ZooKeeper - - Returns: - Set of active broker ids - """ - chroot = zookeeper_config.get("chroot", "") - hosts = zookeeper_config.get("endpoints", "").split(",") - username = zookeeper_config.get("username", "") - password = zookeeper_config.get("password", "") - - zk = ZooKeeperManager(hosts=hosts, username=username, password=password) - path = f"{chroot}/brokers/ids/" - - try: - brokers = zk.leader_znodes(path=path) - # auth might not be ready with ZK after relation yet - except (NoNodeError, AuthFailedError, QuorumLeaderNotFoundError) as e: - logger.debug(str(e)) - return set() - - return brokers - - -def get_zookeeper_version(zookeeper_config: Dict[str, str]) -> str: - """Get running zookeeper version. - - Args: - zookeeper_config: the relation provided by ZooKeeper - - Returns: - zookeeper version - """ - hosts = zookeeper_config.get("endpoints", "").split(",") - username = zookeeper_config.get("username", "") - password = zookeeper_config.get("password", "") - - zk = ZooKeeperManager(hosts=hosts, username=username, password=password) - - return zk.get_version() - - -def safe_get_file(filepath: str) -> Optional[List[str]]: - """Load file contents from charm workload. - - Args: - filepath: the filepath to load data from - - Returns: - List of file content lines - None if file does not exist - """ - if not os.path.exists(filepath): - return None - else: - with open(filepath) as f: - content = f.read().split("\n") - - return content - - -def safe_write_to_file(content: str, path: str, mode: str = "w") -> None: - """Ensures destination filepath exists before writing. - - Args: - content: the content to be written to a file - path: the full destination filepath - mode: the write mode. Usually "w" for write, or "a" for append. Default "w" - """ - os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, mode) as f: - f.write(content) - - set_snap_ownership(path=path) - - -def set_snap_ownership(path: str) -> None: - """Sets a filepath `snap_daemon` ownership.""" - shutil.chown(path, user="snap_daemon", group="root") - - for root, dirs, files in os.walk(path): - for fp in dirs + files: - shutil.chown(os.path.join(root, fp), user="snap_daemon", group="root") - - -def set_snap_mode_bits(path: str) -> None: - """Sets filepath mode bits.""" - os.chmod(path, 0o770) - - for root, dirs, files in os.walk(path): - for fp in dirs + files: - os.chmod(os.path.join(root, fp), 0o770) - - -def generate_password() -> str: - """Creates randomized string for use as app passwords. - - Returns: - String of 32 randomized letter+digit characters - """ - return "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(32)]) - - -def parse_tls_file(raw_content: str) -> str: - """Parse TLS files from both plain text or base64 format.""" - if re.match(r"(-+(BEGIN|END) [A-Z ]+-+)", raw_content): - return raw_content - return base64.b64decode(raw_content).decode("utf-8") - - -def map_env(env: list[str]) -> dict[str, str]: - """Builds environment map for arbitrary env-var strings. - - Returns: - Dict of env-var and value - """ - map_env = {} - for var in env: - key = "".join(var.split("=", maxsplit=1)[0]) - value = "".join(var.split("=", maxsplit=1)[1:]) - if key: - # only check for keys, as we can have an empty value for a variable - map_env[key] = value - - return map_env - - -def get_env() -> dict[str, str]: - """Builds map of current basic environment for all processes. - - Returns: - Dict of env-var and value - """ - raw_env = safe_get_file("/etc/environment") or [] - return map_env(env=raw_env) - - -def update_env(env: dict[str, str]) -> None: - """Updates /etc/environment file.""" - current_env = get_env() - if not env or current_env == env: - return - - updated_env = current_env | env - content = "\n".join([f"{key}={value}" for key, value in updated_env.items() if value]) - safe_write_to_file(content=content, path="/etc/environment", mode="w") diff --git a/src/snap.py b/src/workload.py similarity index 61% rename from src/snap.py rename to src/workload.py index 077dfdd7..7beaf132 100644 --- a/src/snap.py +++ b/src/workload.py @@ -5,8 +5,8 @@ """KafkaSnap class and methods.""" import logging +import os import subprocess -from typing import List from charms.operator_libs_linux.v0 import apt from charms.operator_libs_linux.v1 import snap @@ -14,101 +14,81 @@ from tenacity.retry import retry_if_not_result from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed +from typing_extensions import override -from literals import CHARMED_KAFKA_SNAP_REVISION, SNAP_NAME +from core.workload import WorkloadBase +from literals import CHARMED_KAFKA_SNAP_REVISION, GROUP, SNAP_NAME, USER logger = logging.getLogger(__name__) -class KafkaSnap: +class KafkaWorkload(WorkloadBase): """Wrapper for performing common operations specific to the Kafka Snap.""" + # FIXME: Paths and constants integrated into WorkloadBase? SNAP_NAME = "charmed-kafka" - COMPONENT = "kafka" SNAP_SERVICE = "daemon" LOG_SLOT = "logs" - CONF_PATH = f"/var/snap/{SNAP_NAME}/current/etc/{COMPONENT}" - LOGS_PATH = f"/var/snap/{SNAP_NAME}/common/var/log/{COMPONENT}" - DATA_PATH = f"/var/snap/{SNAP_NAME}/common/var/lib/{COMPONENT}" - BINARIES_PATH = f"/snap/{SNAP_NAME}/current/opt/{COMPONENT}" - def __init__(self) -> None: self.kafka = snap.SnapCache()[SNAP_NAME] - def install(self) -> bool: - """Loads the Kafka snap from LP. - - Returns: - True if successfully installed. False otherwise. - """ - try: - apt.update() - apt.add_package(["snapd"]) - cache = snap.SnapCache() - kafka = cache[SNAP_NAME] - - kafka.ensure(snap.SnapState.Present, revision=CHARMED_KAFKA_SNAP_REVISION) - - self.kafka = kafka - self.kafka.connect(plug="removable-media") - - self.kafka.hold() - - return True - except (snap.SnapError, apt.PackageNotFoundError) as e: - logger.error(str(e)) - return False - - def start_snap_service(self) -> bool: - """Starts snap service process. - - Returns: - True if service successfully starts. False otherwise. - """ + @override + def start(self) -> None: try: self.kafka.start(services=[self.SNAP_SERVICE]) - return True except snap.SnapError as e: logger.exception(str(e)) - return False - def stop_snap_service(self) -> bool: - """Stops snap service process. - - Returns: - True if service successfully stops. False otherwise. - """ + @override + def stop(self) -> None: try: self.kafka.stop(services=[self.SNAP_SERVICE]) - return True except snap.SnapError as e: logger.exception(str(e)) - return False - - def restart_snap_service(self) -> bool: - """Restarts snap service process. - Returns: - True if service successfully restarts. False otherwise. - """ + @override + def restart(self) -> None: try: self.kafka.restart(services=[self.SNAP_SERVICE]) - return True except snap.SnapError as e: logger.exception(str(e)) - return False - def disable_enable(self) -> None: - """Disables then enables snap service. + @override + def read(self, path: str) -> list[str]: + if not os.path.exists(path): + return [] + else: + with open(path) as f: + content = f.read().split("\n") - Necessary for snap services to recognise new storage mounts + return content - Raises: - subprocess.CalledProcessError if error occurs - """ - subprocess.run(f"snap disable {self.SNAP_NAME}", shell=True) - subprocess.run(f"snap enable {self.SNAP_NAME}", shell=True) + @override + def write(self, content: str, path: str, mode: str = "w") -> None: + os.makedirs(os.path.dirname(path), exist_ok=True) + with open(path, mode) as f: + f.write(content) + + self.exec(f"chown -R {USER}:{GROUP} {path}") + + @override + def exec( + self, command: str, env: dict[str, str] | None = None, working_dir: str | None = None + ) -> str: + try: + output = subprocess.check_output( + command, + stderr=subprocess.PIPE, + universal_newlines=True, + shell=True, + cwd=working_dir, + ) + logger.debug(f"{output=}") + return output + except subprocess.CalledProcessError as e: + logger.debug(f"cmd failed - cmd={e.cmd}, stdout={e.stdout}, stderr={e.stderr}") + raise e @retry( wait=wait_fixed(1), @@ -116,20 +96,50 @@ def disable_enable(self) -> None: retry_error_callback=lambda state: state.outcome.result(), # type: ignore retry=retry_if_not_result(lambda result: True if result else False), ) + @override def active(self) -> bool: - """Checks if service is active. + try: + return bool(self.kafka.services[self.SNAP_SERVICE]["active"]) + except KeyError: + return False - Returns: - True if service is active. Otherwise False + @override + def run_bin_command(self, bin_keyword: str, bin_args: list[str], opts: list[str] = []) -> str: + opts_str = " ".join(opts) + bin_str = " ".join(bin_args) + command = f"{opts_str} {SNAP_NAME}.{bin_keyword} {bin_str}" + return self.exec(command) - Raises: - KeyError if service does not exist + def install(self) -> bool: + """Loads the Kafka snap from LP. + + Returns: + True if successfully installed. False otherwise. """ try: - return bool(self.kafka.services[self.SNAP_SERVICE]["active"]) - except KeyError: + apt.update() + apt.add_package(["snapd"]) + + self.kafka.ensure(snap.SnapState.Present, revision=CHARMED_KAFKA_SNAP_REVISION) + self.kafka.connect(plug="removable-media") + self.kafka.hold() + + return True + except (snap.SnapError, apt.PackageNotFoundError) as e: + logger.error(str(e)) return False + def disable_enable(self) -> None: + """Disables then enables snap service. + + Necessary for snap services to recognise new storage mounts + + Raises: + subprocess.CalledProcessError if error occurs + """ + subprocess.run(f"snap disable {self.SNAP_NAME}", shell=True) + subprocess.run(f"snap enable {self.SNAP_NAME}", shell=True) + def get_service_pid(self) -> int: """Gets pid of a currently active snap service. @@ -142,7 +152,6 @@ def get_service_pid(self) -> int: java_processes = subprocess.check_output( "pidof java", stderr=subprocess.PIPE, universal_newlines=True, shell=True ) - logger.debug(f"Java processes: {java_processes}") for pid in java_processes.split(): @@ -156,32 +165,3 @@ def get_service_pid(self) -> int: return int(pid) raise snap.SnapError(f"Snap {self.SNAP_NAME} pid not found") - - @staticmethod - def run_bin_command(bin_keyword: str, bin_args: List[str], opts: List[str] = []) -> str: - """Runs kafka bin command with desired args. - - Args: - bin_keyword: the kafka shell script to run - e.g `configs`, `topics` etc - bin_args: the shell command args - opts: any additional opts args strings - - Returns: - String of kafka bin command output - - Raises: - `subprocess.CalledProcessError`: if the error returned a non-zero exit code - """ - args_string = " ".join(bin_args) - opts_string = " ".join(opts) - command = f"{opts_string} {SNAP_NAME}.{bin_keyword} {args_string}" - try: - output = subprocess.check_output( - command, stderr=subprocess.PIPE, universal_newlines=True, shell=True - ) - logger.debug(f"{output=}") - return output - except subprocess.CalledProcessError as e: - logger.debug(f"cmd failed - cmd={e.cmd}, stdout={e.stdout}, stderr={e.stderr}") - raise e diff --git a/tests/integration/app-charm/src/charm.py b/tests/integration/app-charm/src/charm.py index f7bcd2d2..77c7054e 100755 --- a/tests/integration/app-charm/src/charm.py +++ b/tests/integration/app-charm/src/charm.py @@ -21,7 +21,6 @@ from ops.charm import ActionEvent, CharmBase, RelationEvent from ops.main import main from ops.model import ActiveStatus - from utils import safe_write_to_file logger = logging.getLogger(__name__) diff --git a/tests/integration/app-charm/src/utils.py b/tests/integration/app-charm/src/utils.py index f29568b3..9a935bc1 100644 --- a/tests/integration/app-charm/src/utils.py +++ b/tests/integration/app-charm/src/utils.py @@ -9,12 +9,11 @@ import secrets import shutil import string -from typing import List, Optional logger = logging.getLogger(__name__) -def safe_get_file(filepath: str) -> Optional[List[str]]: +def safe_get_file(filepath: str) -> list[str] | None: """Load file contents from charm workload. Args: diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 999ce3af..fa1356c0 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. from pathlib import Path diff --git a/tests/integration/ha/continuous_writes.py b/tests/integration/ha/continuous_writes.py index 36f6cfc1..379dcbe9 100644 --- a/tests/integration/ha/continuous_writes.py +++ b/tests/integration/ha/continuous_writes.py @@ -8,7 +8,6 @@ from dataclasses import dataclass from multiprocessing import Event, Process, Queue from types import SimpleNamespace -from typing import List, Optional from charms.kafka.v0.client import KafkaClient from kafka.admin import NewTopic @@ -33,14 +32,14 @@ class ContinuousWritesResult: count: int last_expected_message: int - consumed_messages: Optional[List[ConsumerRecord]] + consumed_messages: list[ConsumerRecord] | None class ContinuousWrites: """Utility class for managing continuous writes.""" TOPIC_NAME = "ha-test-topic" - LAST_WRITTEN_VAL_PATH = "last_written_value" + LAST_WRITTEN_VAL_PATH = "/tmp/last_written_value" def __init__(self, ops_test: OpsTest, app: str): self._ops_test = ops_test @@ -90,7 +89,7 @@ def clear(self) -> None: finally: client.close() - def consumed_messages(self) -> List[ConsumerRecord] | None: + def consumed_messages(self) -> list[ConsumerRecord] | None: """Consume the messages in the topic.""" client = self._client() try: diff --git a/tests/integration/ha/ha_helpers.py b/tests/integration/ha/ha_helpers.py index add9369e..498193a9 100644 --- a/tests/integration/ha/ha_helpers.py +++ b/tests/integration/ha/ha_helpers.py @@ -6,15 +6,17 @@ import subprocess from dataclasses import dataclass from subprocess import PIPE, check_output -from typing import Optional from pytest_operator.plugin import OpsTest from integration.ha.continuous_writes import ContinuousWritesResult -from integration.helpers import APP_NAME, get_address, get_kafka_zk_relation_data -from literals import SECURITY_PROTOCOL_PORTS -from snap import KafkaSnap -from utils import get_active_brokers +from integration.helpers import ( + APP_NAME, + get_active_brokers, + get_address, + get_kafka_zk_relation_data, +) +from literals import PATHS, SECURITY_PROTOCOL_PORTS PROCESS = "kafka.Kafka" SERVICE_DEFAULT_PATH = "/etc/systemd/system/snap.charmed-kafka.daemon.service" @@ -38,7 +40,7 @@ class ProcessRunningError(Exception): async def get_topic_description( - ops_test: OpsTest, topic: str, unit_name: Optional[str] = None + ops_test: OpsTest, topic: str, unit_name: str | None = None ) -> TopicDescription: """Get the broker with the topic leader. @@ -56,7 +58,7 @@ async def get_topic_description( unit_name = unit_name or ops_test.model.applications[APP_NAME].units[0].name output = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.topics --bootstrap-server {','.join(bootstrap_servers)} --command-config {KafkaSnap.CONF_PATH}/client.properties --describe --topic {topic}'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.topics --bootstrap-server {','.join(bootstrap_servers)} --command-config {PATHS['CONF']}/client.properties --describe --topic {topic}'", stderr=PIPE, shell=True, universal_newlines=True, @@ -69,7 +71,7 @@ async def get_topic_description( async def get_topic_offsets( - ops_test: OpsTest, topic: str, unit_name: Optional[str] = None + ops_test: OpsTest, topic: str, unit_name: str | None = None ) -> list[str]: """Get the offsets of a topic on a unit. @@ -88,7 +90,7 @@ async def get_topic_offsets( # example of topic offset output: 'test-topic:0:10' result = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.get-offsets --bootstrap-server {','.join(bootstrap_servers)} --command-config {KafkaSnap.CONF_PATH}/client.properties --topic {topic}'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.get-offsets --bootstrap-server {','.join(bootstrap_servers)} --command-config {PATHS['CONF']}/client.properties --topic {topic}'", stderr=PIPE, shell=True, universal_newlines=True, @@ -219,7 +221,7 @@ def is_up(ops_test: OpsTest, broker_id: int) -> bool: kafka_zk_relation_data = get_kafka_zk_relation_data( unit_name=unit_name, model_full_name=ops_test.model_full_name ) - active_brokers = get_active_brokers(zookeeper_config=kafka_zk_relation_data) + active_brokers = get_active_brokers(config=kafka_zk_relation_data) chroot = kafka_zk_relation_data.get("chroot", "") return f"{chroot}/brokers/ids/{broker_id}" in active_brokers diff --git a/tests/integration/ha/test_ha.py b/tests/integration/ha/test_ha.py index ef4f1628..6ab66747 100644 --- a/tests/integration/ha/test_ha.py +++ b/tests/integration/ha/test_ha.py @@ -36,6 +36,7 @@ RESTART_DELAY = 60 CLIENT_TIMEOUT = 30 REELECTION_TIME = 25 +PRODUCING_MESSAGES = 10 logger = logging.getLogger(__name__) @@ -312,7 +313,7 @@ async def test_full_cluster_crash( restart_delay, ): # Let some time pass for messages to be produced - await asyncio.sleep(10) + await asyncio.sleep(PRODUCING_MESSAGES) logger.info("Killing all brokers...") # kill all units "simultaneously" @@ -345,7 +346,7 @@ async def test_full_cluster_restart( c_writes_runner: ContinuousWrites, ): # Let some time pass for messages to be produced - await asyncio.sleep(10) + await asyncio.sleep(PRODUCING_MESSAGES) logger.info("Restarting all brokers...") # Restart all units "simultaneously" @@ -380,7 +381,7 @@ async def test_network_cut_without_ip_change( c_writes_runner: ContinuousWrites, ): # Let some time pass for messages to be produced - await asyncio.sleep(10) + await asyncio.sleep(PRODUCING_MESSAGES) topic_description = await get_topic_description( ops_test=ops_test, topic=ContinuousWrites.TOPIC_NAME @@ -436,7 +437,7 @@ async def test_network_cut( c_writes_runner: ContinuousWrites, ): # Let some time pass for messages to be produced - await asyncio.sleep(10) + await asyncio.sleep(PRODUCING_MESSAGES) topic_description = await get_topic_description( ops_test=ops_test, topic=ContinuousWrites.TOPIC_NAME diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 91087f66..7e031bff 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -12,12 +12,13 @@ import yaml from charms.kafka.v0.client import KafkaClient +from charms.zookeeper.v0.client import QuorumLeaderNotFoundError, ZooKeeperManager from kafka.admin import NewTopic +from kazoo.exceptions import AuthFailedError, NoNodeError from pytest_operator.plugin import OpsTest -from auth import Acl, KafkaAuth -from literals import SECURITY_PROTOCOL_PORTS -from snap import KafkaSnap +from literals import PATHS, SECURITY_PROTOCOL_PORTS +from managers.auth import Acl, AuthManager METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) APP_NAME = METADATA["name"] @@ -37,7 +38,7 @@ def load_acls(model_full_name: str, zookeeper_uri: str) -> Set[Acl]: universal_newlines=True, ) - return KafkaAuth._parse_acls(acls=result) + return AuthManager._parse_acls(acls=result) def load_super_users(model_full_name: str) -> List[str]: @@ -149,6 +150,33 @@ def get_provider_data( return provider_relation_data +def get_active_brokers(config: Dict) -> Set[str]: + """Gets all brokers currently connected to ZooKeeper. + + Args: + config: the relation data provided by ZooKeeper + + Returns: + Set of active broker ids + """ + chroot = config.get("chroot", "") + hosts = config.get("endpoints", "").split(",") + username = config.get("username", "") + password = config.get("password", "") + + zk = ZooKeeperManager(hosts=hosts, username=username, password=password) + path = f"{chroot}/brokers/ids/" + + try: + brokers = zk.leader_znodes(path=path) + # auth might not be ready with ZK after relation yet + except (NoNodeError, AuthFailedError, QuorumLeaderNotFoundError) as e: + logger.debug(str(e)) + return set() + + return brokers + + async def get_address(ops_test: OpsTest, app_name=APP_NAME, unit_num=0) -> str: """Get the address for a unit.""" status = await ops_test.model.get_status() # noqa: F821 @@ -313,7 +341,7 @@ def check_logs(model_full_name: str, kafka_unit_name: str, topic: str) -> None: topic: the desired topic to check """ logs = check_output( - f"JUJU_MODEL={model_full_name} juju ssh {kafka_unit_name} sudo -i 'find {KafkaSnap.DATA_PATH}/data'", + f"JUJU_MODEL={model_full_name} juju ssh {kafka_unit_name} sudo -i 'find {PATHS['DATA']}/data'", stderr=PIPE, shell=True, universal_newlines=True, @@ -337,7 +365,7 @@ async def run_client_properties(ops_test: OpsTest) -> str: + f":{SECURITY_PROTOCOL_PORTS['SASL_PLAINTEXT'].client}" ) result = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'charmed-kafka.configs --bootstrap-server {bootstrap_server} --describe --all --command-config {KafkaSnap.CONF_PATH}/client.properties --entity-type users'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'charmed-kafka.configs --bootstrap-server {bootstrap_server} --describe --all --command-config {PATHS['CONF']}/client.properties --entity-type users'", stderr=PIPE, shell=True, universal_newlines=True, @@ -349,7 +377,7 @@ async def run_client_properties(ops_test: OpsTest) -> str: async def set_mtls_client_acls(ops_test: OpsTest, bootstrap_server: str) -> str: """Adds ACLs for principal `User:client` and `TEST-TOPIC`.""" result = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'sudo charmed-kafka.acls --bootstrap-server {bootstrap_server} --add --allow-principal=User:client --operation READ --operation WRITE --operation CREATE --topic TEST-TOPIC --command-config {KafkaSnap.CONF_PATH}/client.properties'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'sudo charmed-kafka.acls --bootstrap-server {bootstrap_server} --add --allow-principal=User:client --operation READ --operation WRITE --operation CREATE --topic TEST-TOPIC --command-config {PATHS['CONF']}/client.properties'", stderr=PIPE, shell=True, universal_newlines=True, diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index e9a56a47..4cb520c8 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -6,7 +6,6 @@ import logging import time from subprocess import PIPE, check_output -from typing import Dict import pytest import requests @@ -103,6 +102,22 @@ async def test_build_and_deploy(ops_test: OpsTest, kafka_charm): assert ops_test.model.applications[ZK_NAME].status == "active" +@pytest.mark.abort_on_fail +async def test_remove_zk_relation_relate(ops_test: OpsTest): + remove_relation_cmd = f"remove-relation {APP_NAME} {ZK_NAME}" + await ops_test.juju(*remove_relation_cmd.split(), check=True) + await ops_test.model.wait_for_idle(apps=[APP_NAME, ZK_NAME], idle_period=40, timeout=3600) + + assert ops_test.model.applications[APP_NAME].status == "blocked" + assert ops_test.model.applications[ZK_NAME].status == "active" + + await ops_test.model.add_relation(APP_NAME, ZK_NAME) + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle( + apps=[APP_NAME, ZK_NAME], status="active", idle_period=30, timeout=1000 + ) + + @pytest.mark.abort_on_fail async def test_listeners(ops_test: OpsTest, app_charm): address = await get_address(ops_test=ops_test) @@ -268,7 +283,7 @@ async def test_observability_integration(ops_test: OpsTest): agent_units = ops_test.model.applications["agent"].units # Get all the "targets" from all grafana-agent units - machine_targets: Dict[str, str] = { + machine_targets: dict[str, str] = { unit.machine.id: await unit.machine.ssh( "curl localhost:12345/agent/api/v1/metrics/targets" ) diff --git a/tests/integration/test_provider.py b/tests/integration/test_provider.py index 2ed2357a..88390695 100644 --- a/tests/integration/test_provider.py +++ b/tests/integration/test_provider.py @@ -4,7 +4,6 @@ import asyncio import logging -from typing import Set import pytest from pytest_operator.plugin import OpsTest @@ -33,7 +32,7 @@ @pytest.mark.abort_on_fail async def test_deploy_charms_relate_active( - ops_test: OpsTest, kafka_charm, app_charm, usernames: Set[str] + ops_test: OpsTest, kafka_charm, app_charm, usernames: set[str] ): """Test deploy and relate operations.""" await asyncio.gather( @@ -82,7 +81,7 @@ async def test_deploy_charms_relate_active( @pytest.mark.abort_on_fail async def test_deploy_multiple_charms_same_topic_relate_active( - ops_test: OpsTest, app_charm, usernames: Set[str] + ops_test: OpsTest, app_charm, usernames: set[str] ): """Test relation with multiple applications.""" await ops_test.model.deploy(app_charm, application_name=DUMMY_NAME_2, num_units=1) @@ -114,7 +113,7 @@ async def test_deploy_multiple_charms_same_topic_relate_active( @pytest.mark.abort_on_fail -async def test_remove_application_removes_user_and_acls(ops_test: OpsTest, usernames: Set[str]): +async def test_remove_application_removes_user_and_acls(ops_test: OpsTest, usernames: set[str]): """Test the correct removal of user and permission after relation removal.""" await ops_test.model.remove_application(DUMMY_NAME_1, block_until_done=True) await ops_test.model.wait_for_idle(apps=[APP_NAME]) @@ -143,7 +142,7 @@ async def test_remove_application_removes_user_and_acls(ops_test: OpsTest, usern @pytest.mark.abort_on_fail -async def test_deploy_producer_same_topic(ops_test: OpsTest, app_charm, usernames: Set[str]): +async def test_deploy_producer_same_topic(ops_test: OpsTest, app_charm, usernames: set[str]): """Test the correct deployment and relation with role producer.""" await asyncio.gather( ops_test.model.deploy( diff --git a/tests/integration/test_scaling.py b/tests/integration/test_scaling.py index a150e333..4af643ba 100644 --- a/tests/integration/test_scaling.py +++ b/tests/integration/test_scaling.py @@ -10,9 +10,8 @@ from pytest_operator.plugin import OpsTest from literals import CHARM_KEY, ZK -from utils import get_active_brokers -from .helpers import get_kafka_zk_relation_data +from .helpers import get_active_brokers, get_kafka_zk_relation_data logger = logging.getLogger(__name__) @@ -41,7 +40,7 @@ async def test_kafka_simple_scale_up(ops_test: OpsTest, kafka_charm): kafka_zk_relation_data = get_kafka_zk_relation_data( unit_name="kafka/2", model_full_name=ops_test.model_full_name ) - active_brokers = get_active_brokers(zookeeper_config=kafka_zk_relation_data) + active_brokers = get_active_brokers(config=kafka_zk_relation_data) chroot = kafka_zk_relation_data.get("chroot", "") assert f"{chroot}/brokers/ids/0" in active_brokers assert f"{chroot}/brokers/ids/1" in active_brokers @@ -60,7 +59,7 @@ async def test_kafka_simple_scale_down(ops_test: OpsTest): kafka_zk_relation_data = get_kafka_zk_relation_data( unit_name="kafka/2", model_full_name=ops_test.model_full_name ) - active_brokers = get_active_brokers(zookeeper_config=kafka_zk_relation_data) + active_brokers = get_active_brokers(config=kafka_zk_relation_data) chroot = kafka_zk_relation_data.get("chroot", "") assert f"{chroot}/brokers/ids/0" in active_brokers assert f"{chroot}/brokers/ids/1" not in active_brokers diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py index db86de17..8e7f1402 100644 --- a/tests/integration/test_tls.py +++ b/tests/integration/test_tls.py @@ -18,13 +18,13 @@ TRUSTED_CERTIFICATE_RELATION, ZK, ) -from utils import get_active_brokers from .helpers import ( REL_NAME_ADMIN, check_tls, extract_ca, extract_private_key, + get_active_brokers, get_address, get_kafka_zk_relation_data, set_mtls_client_acls, @@ -87,6 +87,8 @@ async def test_kafka_tls(ops_test: OpsTest, app_charm): await ops_test.model.wait_for_idle( apps=[ZK], idle_period=15, timeout=1000, status="active" ) + + # Unit is on 'blocked' but whole app is on 'waiting' assert ops_test.model.applications[CHARM_KEY].status == "blocked" # Set a custom private key, by running set-tls-private-key action with no parameters, @@ -100,8 +102,8 @@ async def test_kafka_tls(ops_test: OpsTest, app_charm): ) async with ops_test.fast_forward(): - await ops_test.model.add_relation(f"{CHARM_KEY}:{TLS_RELATION}", TLS_NAME) logger.info("Relate Kafka to TLS") + await ops_test.model.add_relation(f"{CHARM_KEY}:{TLS_RELATION}", TLS_NAME) await ops_test.model.wait_for_idle( apps=[CHARM_KEY, ZK, TLS_NAME], idle_period=30, timeout=1200, status="active" ) @@ -110,6 +112,7 @@ async def test_kafka_tls(ops_test: OpsTest, app_charm): assert ops_test.model.applications[ZK].status == "active" kafka_address = await get_address(ops_test=ops_test, app_name=CHARM_KEY) + logger.info("Check for Kafka TLS") assert not check_tls(ip=kafka_address, port=SECURITY_PROTOCOL_PORTS["SASL_SSL"].client) async with ops_test.fast_forward(): @@ -237,7 +240,7 @@ async def test_kafka_tls_scaling(ops_test: OpsTest): kafka_zk_relation_data = get_kafka_zk_relation_data( unit_name=f"{CHARM_KEY}/2", model_full_name=ops_test.model_full_name ) - active_brokers = get_active_brokers(zookeeper_config=kafka_zk_relation_data) + active_brokers = get_active_brokers(config=kafka_zk_relation_data) chroot = kafka_zk_relation_data.get("chroot", "") assert f"{chroot}/brokers/ids/0" in active_brokers assert f"{chroot}/brokers/ids/1" in active_brokers diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 5f406591..63038d8e 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +from unittest.mock import patch import pytest - -from literals import INTERNAL_USERS +from src.literals import INTERNAL_USERS, SUBSTRATE @pytest.fixture(scope="module") @@ -22,3 +22,46 @@ def zk_data() -> dict[str, str]: @pytest.fixture(scope="module") def passwords_data() -> dict[str, str]: return {f"{user}-password": "mellon" for user in INTERNAL_USERS} + + +@pytest.fixture(autouse=True) +def patched_pebble_restart(mocker): + mocker.patch("ops.model.Container.restart") + + +@pytest.fixture(autouse=True) +def patched_etc_environment(): + with patch("managers.config.KafkaConfigManager.set_environment") as etc_env: + yield etc_env + + +@pytest.fixture(autouse=True) +def patched_workload_write(): + with patch("workload.KafkaWorkload.write") as workload_write: + yield workload_write + + +@pytest.fixture(autouse=True) +def patched_sysctl_config(): + if SUBSTRATE == "vm": + with patch("charm.sysctl.Config.configure") as sysctl_config: + yield sysctl_config + else: + yield + + +@pytest.fixture(autouse=True) +def patched_exec(): + with patch("workload.KafkaWorkload.exec") as patched_exec: + yield patched_exec + + +@pytest.fixture() +def patched_health_machine_configured(): + if SUBSTRATE == "vm": + with patch( + "health.KafkaHealth.machine_configured", return_value=True + ) as machine_configured: + yield machine_configured + else: + yield diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 6a5210bf..980e00b2 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging @@ -10,9 +10,9 @@ import yaml from ops.testing import Harness -from auth import Acl, KafkaAuth from charm import KafkaCharm -from literals import CHARM_KEY +from literals import CHARM_KEY, CONTAINER, SUBSTRATE +from managers.auth import Acl, AuthManager logger = logging.getLogger(__name__) @@ -24,6 +24,10 @@ @pytest.fixture def harness(): harness = Harness(KafkaCharm, meta=METADATA) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + harness.add_relation("restart", CHARM_KEY) harness._update_config( { @@ -56,7 +60,7 @@ def test_parse_acls(): (principal=User:relation-81, host=*, operation=READ, permissionType=ALLOW) """ - parsed_acls = KafkaAuth._parse_acls(acls=acls) + parsed_acls = AuthManager._parse_acls(acls=acls) assert len(parsed_acls) == 5 assert type(list(parsed_acls)[0]) == Acl @@ -64,7 +68,7 @@ def test_parse_acls(): def test_generate_producer_acls(): """Checks correct resourceType for producer ACLs.""" - generated_acls = KafkaAuth._generate_producer_acls(topic="theonering", username="frodo") + generated_acls = AuthManager._generate_producer_acls(topic="theonering", username="frodo") assert len(generated_acls) == 3 operations = set() @@ -79,7 +83,7 @@ def test_generate_producer_acls(): def test_generate_consumer_acls(): """Checks correct resourceType for consumer ACLs.""" - generated_acls = KafkaAuth._generate_consumer_acls(topic="theonering", username="frodo") + generated_acls = AuthManager._generate_consumer_acls(topic="theonering", username="frodo") assert len(generated_acls) == 3 operations = set() @@ -97,17 +101,12 @@ def test_generate_consumer_acls(): def test_add_user_adds_zk_tls_flag(harness): """Checks zk-tls-config-file flag is called for configs bin command.""" - with patch("subprocess.check_output") as patched_check_output: - auth = KafkaAuth(harness.charm) - auth.add_user("samwise", "gamgee", zk_auth=True) - - found_tls = False - found_zk = False - for arg in patched_check_output.call_args.args: - if "--zk-tls-config-file" in arg: - found_tls = True - if "--zookeeper" in arg: - found_zk = True - - assert found_tls, "--zk-tls-config-file flag not found" - assert found_zk, "--zookeeper flag not found" + with patch("workload.KafkaWorkload.run_bin_command") as patched_exec: + harness.charm.auth_manager.add_user("samwise", "gamgee", zk_auth=True) + args = patched_exec.call_args_list[0][1] + + assert ( + f"--zk-tls-config-file={harness.charm.workload.paths.server_properties}" + in args["bin_args"] + ), "--zk-tls-config-file flag not found" + assert "--zookeeper=" in args["bin_args"], "--zookeeper flag not found" diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index f3678623..38a9ecd2 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1,21 +1,35 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging +import re from pathlib import Path from unittest.mock import PropertyMock, patch import pytest import yaml -from charms.operator_libs_linux.v0.sysctl import ApplyError -from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, WaitingStatus +from ops.model import BlockedStatus from ops.testing import Harness -from tenacity.wait import wait_none -import snap from charm import KafkaCharm -from literals import CHARM_KEY, INTERNAL_USERS, OS_REQUIREMENTS, PEER, REL_NAME, ZK +from literals import ( + CHARM_KEY, + CONTAINER, + INTERNAL_USERS, + JMX_EXPORTER_PORT, + PEER, + REL_NAME, + SUBSTRATE, + ZK, + Status, +) + +if SUBSTRATE == "vm": + from charms.operator_libs_linux.v0.sysctl import ApplyError + from charms.operator_libs_linux.v1.snap import SnapError + + from literals import OS_REQUIREMENTS logger = logging.getLogger(__name__) @@ -25,8 +39,12 @@ @pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA) +def harness() -> Harness: + harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + harness.add_relation("restart", CHARM_KEY) harness._update_config( { @@ -36,46 +54,76 @@ def harness(): ) harness.begin() storage_metadata = getattr(harness.charm, "meta").storages["data"] - min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 0 + min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 1 with harness.hooks_disabled(): harness.add_storage(storage_name="data", count=min_storages, attach=True) - return harness -def test_ready_to_start_maintenance_no_peer_relation(harness): - assert not harness.charm.ready_to_start - assert isinstance(harness.charm.unit.status, MaintenanceStatus) +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") +def test_install_blocks_snap_install_failure(harness: Harness): + """Checks unit goes to BlockedStatus after snap failure on install hook.""" + with patch("workload.KafkaWorkload.install", return_value=False): + harness.charm.on.install.emit() + assert harness.charm.unit.status == Status.SNAP_NOT_INSTALLED.value.status + + +def test_install_sets_env_vars(harness: Harness, patched_etc_environment): + """Checks KAFKA_OPTS and other vars are written to /etc/environment on install hook.""" + with patch("workload.KafkaWorkload.install"): + harness.charm.on.install.emit() + patched_etc_environment.assert_called_once() + + +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") +def test_install_configures_os(harness: Harness, patched_sysctl_config): + with patch("workload.KafkaWorkload.install"): + harness.charm.on.install.emit() + patched_sysctl_config.assert_called_once_with(OS_REQUIREMENTS) + + +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") +def test_install_sets_status_if_os_config_fails(harness: Harness, patched_sysctl_config): + with patch("workload.KafkaWorkload.install"): + patched_sysctl_config.side_effect = ApplyError("Error setting values") + harness.charm.on.install.emit() + + assert harness.charm.unit.status == Status.SYSCONF_NOT_POSSIBLE.value.status + +def test_ready_to_start_maintenance_no_peer_relation(harness: Harness): + harness.charm.on.start.emit() + assert harness.charm.unit.status == Status.NO_PEER_RELATION.value.status -def test_ready_to_start_blocks_no_zookeeper_relation(harness): + +def test_ready_to_start_blocks_no_zookeeper_relation(harness: Harness): with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) - assert not harness.charm.ready_to_start - assert isinstance(harness.charm.unit.status, BlockedStatus) + harness.charm.on.start.emit() + assert harness.charm.unit.status == Status.ZK_NOT_RELATED.value.status -def test_ready_to_start_waits_no_zookeeper_data(harness): +def test_ready_to_start_waits_no_zookeeper_data(harness: Harness): with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) harness.add_relation(ZK, ZK) - assert not harness.charm.ready_to_start - assert isinstance(harness.charm.unit.status, WaitingStatus) + harness.charm.on.start.emit() + assert harness.charm.unit.status == Status.ZK_NO_DATA.value.status -def test_ready_to_start_waits_no_user_credentials(harness, zk_data): +def test_ready_to_start_waits_no_user_credentials(harness: Harness, zk_data): with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) harness.update_relation_data(zk_rel_id, ZK, zk_data) - assert not harness.charm.ready_to_start - assert isinstance(harness.charm.unit.status, WaitingStatus) + harness.charm.on.start.emit() + assert harness.charm.unit.status == Status.NO_BROKER_CREDS.value.status -def test_ready_to_start_blocks_mismatch_tls(harness, zk_data, passwords_data): +def test_ready_to_start_blocks_mismatch_tls(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -83,21 +131,21 @@ def test_ready_to_start_blocks_mismatch_tls(harness, zk_data, passwords_data): harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, {"tls": "enabled"}) - assert not harness.charm.ready_to_start - assert isinstance(harness.charm.unit.status, BlockedStatus) + harness.charm.on.start.emit() + assert harness.charm.unit.status == Status.ZK_TLS_MISMATCH.value.status -def test_ready_to_start_succeeds(harness, zk_data, passwords_data): +def test_ready_to_start_succeeds(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) harness.update_relation_data(zk_rel_id, ZK, zk_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) - assert harness.charm.ready_to_start + assert harness.charm.state.ready_to_start.value.status == Status.ACTIVE.value.status -def test_healthy_fails_if_not_ready_to_start(harness, zk_data, passwords_data): +def test_healthy_fails_if_not_ready_to_start(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -106,51 +154,170 @@ def test_healthy_fails_if_not_ready_to_start(harness, zk_data, passwords_data): harness.update_relation_data(peer_rel_id, CHARM_KEY, {"tls": "enabled"}) assert not harness.charm.healthy - assert isinstance(harness.charm.unit.status, BlockedStatus) -def test_healthy_fails_if_snap_not_active(harness, zk_data, passwords_data): +def test_healthy_fails_if_snap_not_active(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) harness.update_relation_data(zk_rel_id, ZK, zk_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) - with patch("snap.KafkaSnap.active", return_value=False) as patched_snap_active: + with patch("workload.KafkaWorkload.active", return_value=False) as patched_snap_active: assert not harness.charm.healthy assert patched_snap_active.call_count == 1 - assert isinstance(harness.charm.unit.status, BlockedStatus) + if SUBSTRATE == "vm": + assert harness.charm.unit.status == Status.SNAP_NOT_RUNNING.value.status + elif SUBSTRATE == "k8s": + assert harness.charm.unit.status == Status.SERVICE_NOT_RUNNING.value.status -def test_healthy_does_not_ping_zk_if_snap_not_active(harness, zk_data, passwords_data): +def test_healthy_succeeds(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) harness.update_relation_data(zk_rel_id, ZK, zk_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + with patch("workload.KafkaWorkload.active", return_value=True): + assert harness.charm.healthy + + +def test_start_defers_without_zookeeper(harness: Harness): + """Checks event deferred and not lost without ZK relation on start hook.""" + with patch("ops.framework.EventBase.defer") as patched_defer: + harness.charm.on.start.emit() + + patched_defer.assert_called_once() + + +def test_start_sets_necessary_config(harness: Harness, zk_data, passwords_data): + """Checks event writes all needed config to unit on start hook.""" + with harness.hooks_disabled(): + peer_rel_id = harness.add_relation(PEER, CHARM_KEY) + zk_rel_id = harness.add_relation(ZK, ZK) + harness.set_leader(True) + harness.add_relation_unit(zk_rel_id, "zookeeper/0") + harness.update_relation_data(zk_rel_id, ZK, zk_data) + harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + with ( - patch("snap.KafkaSnap.active", return_value=False), - patch("charm.broker_active", return_value=False) as patched_broker_active, + patch("managers.auth.AuthManager.add_user"), + patch("managers.config.KafkaConfigManager.set_zk_jaas_config") as patched_jaas, + patch( + "managers.config.KafkaConfigManager.set_server_properties" + ) as patched_server_properties, + patch( + "managers.config.KafkaConfigManager.set_client_properties" + ) as patched_client_properties, + patch("workload.KafkaWorkload.start"), + # NOTE: Patching `active` cuts the hook short, as we are only testing properties being set. + patch("workload.KafkaWorkload.active", return_value=False), ): - assert patched_broker_active.call_count == 0 + harness.charm.on.start.emit() + patched_jaas.assert_called_once() + patched_server_properties.assert_called_once() + patched_client_properties.assert_called_once() -def test_healthy_succeeds(harness, zk_data, passwords_data): +@pytest.mark.skipif(SUBSTRATE == "vm", reason="pebble layer not used on vm") +def test_start_sets_pebble_layer(harness: Harness, zk_data, passwords_data): + """Checks layer is the expected at start.""" with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) + harness.set_leader(True) + harness.add_relation_unit(zk_rel_id, "zookeeper/0") harness.update_relation_data(zk_rel_id, ZK, zk_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("snap.KafkaSnap.active", return_value=True), - patch("charm.broker_active", return_value=True), + patch("managers.auth.AuthManager.add_user"), + patch("managers.config.KafkaConfigManager.set_zk_jaas_config"), + patch("managers.config.KafkaConfigManager.set_server_properties"), + patch("managers.config.KafkaConfigManager.set_client_properties"), + # NOTE: Patching `active` cuts the hook short, as we are only testing layer being set. + patch("workload.KafkaWorkload.active", return_value=False), ): - assert harness.charm.healthy + harness.charm.on.start.emit() + found_plan = harness.get_container_pebble_plan("kafka").to_dict() + extra_opts = [ + f"-javaagent:{harness.charm.workload.paths.jmx_prometheus_javaagent}={JMX_EXPORTER_PORT}:{harness.charm.workload.paths.jmx_prometheus_config}", + f"-Djava.security.auth.login.config={harness.charm.workload.paths.zk_jaas}", + ] + command = f"{harness.charm.workload.paths.binaries_path}/bin/kafka-server-start.sh {harness.charm.workload.paths.server_properties}" + expected_plan = { + "services": { + CONTAINER: { + "override": "replace", + "summary": "kafka", + "command": command, + "startup": "enabled", + "user": "kafka", + "group": "kafka", + "environment": { + "KAFKA_OPTS": " ".join(extra_opts), + "JAVA_HOME": "/usr/lib/jvm/java-17-openjdk-amd64", + "LOG_DIR": harness.charm.workload.paths.logs_path, + }, + } + }, + } + assert expected_plan == found_plan -def test_update_status_blocks_if_broker_not_active(harness, zk_data, passwords_data): +def test_start_does_not_start_if_not_ready(harness: Harness): + """Checks snap service does not start before ready on start hook.""" + with harness.hooks_disabled(): + harness.add_relation(PEER, CHARM_KEY) + + with ( + patch("workload.KafkaWorkload.start") as patched_start_snap_service, + patch("ops.framework.EventBase.defer") as patched_defer, + ): + harness.charm.on.start.emit() + + patched_start_snap_service.assert_not_called() + patched_defer.assert_called() + + +def test_start_does_not_start_if_not_same_tls_as_zk(harness: Harness): + """Checks snap service does not start if mismatch Kafka+ZK TLS on start hook.""" + harness.add_relation(PEER, CHARM_KEY) + zk_rel_id = harness.add_relation(ZK, ZK) + harness.add_relation_unit(zk_rel_id, "zookeeper/0") + + with ( + patch("managers.auth.AuthManager.add_user"), + patch("workload.KafkaWorkload.start") as patched_start_snap_service, + patch("core.cluster.ZooKeeper.zookeeper_connected", return_value=True), + patch("core.models.KafkaCluster.internal_user_credentials", return_value="orthanc"), + patch("core.models.KafkaCluster.tls_enabled", return_value=True), + ): + harness.charm.on.start.emit() + + patched_start_snap_service.assert_not_called() + assert harness.charm.unit.status == Status.ZK_TLS_MISMATCH.value.status + + +def test_start_does_not_start_if_leader_has_not_set_creds(harness: Harness): + """Checks snap service does not start without inter-broker creds on start hook.""" + peer_rel_id = harness.add_relation(PEER, CHARM_KEY) + zk_rel_id = harness.add_relation(ZK, ZK) + harness.add_relation_unit(zk_rel_id, "zookeeper/0") + harness.update_relation_data(peer_rel_id, CHARM_KEY, {"sync-password": "mellon"}) + + with ( + patch("workload.KafkaWorkload.start") as patched_start_snap_service, + patch("core.cluster.ZooKeeper.zookeeper_connected", return_value=True), + ): + harness.charm.on.start.emit() + + patched_start_snap_service.assert_not_called() + assert harness.charm.unit.status == Status.NO_BROKER_CREDS.value.status + + +def test_update_status_blocks_if_broker_not_active(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -158,16 +325,17 @@ def test_update_status_blocks_if_broker_not_active(harness, zk_data, passwords_d harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("snap.KafkaSnap.active", return_value=True), - patch("charm.broker_active", return_value=False) as patched_broker_active, - patch("upgrade.KafkaUpgrade.idle", return_value=True), + patch("workload.KafkaWorkload.active", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("core.cluster.ZooKeeper.broker_active", return_value=False) as patched_broker_active, ): harness.charm.on.update_status.emit() assert patched_broker_active.call_count == 1 - assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status == Status.ZK_NOT_CONNECTED.value.status -def test_update_status_blocks_if_no_service(harness, zk_data, passwords_data): +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="machine health checks not used on K8s") +def test_update_status_blocks_if_machine_not_configured(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -175,16 +343,36 @@ def test_update_status_blocks_if_no_service(harness, zk_data, passwords_data): harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("health.KafkaHealth.machine_configured", side_effect=snap.snap.SnapError()), + patch("health.KafkaHealth.machine_configured", side_effect=SnapError()), patch("charm.KafkaCharm.healthy", return_value=True), - patch("charm.broker_active", return_value=True), - patch("upgrade.KafkaUpgrade.idle", return_value=True), + patch("core.cluster.ZooKeeper.broker_active", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): harness.charm.on.update_status.emit() - assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status == Status.SNAP_NOT_RUNNING.value.status + +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") +def test_update_status_sets_sysconf_warning(harness: Harness, zk_data, passwords_data): + with harness.hooks_disabled(): + peer_rel_id = harness.add_relation(PEER, CHARM_KEY) + zk_rel_id = harness.add_relation(ZK, ZK) + harness.update_relation_data(zk_rel_id, ZK, zk_data) + harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) -def test_update_status_sets_active(harness, zk_data, passwords_data): + with ( + patch("workload.KafkaWorkload.active", return_value=True), + patch("core.cluster.ZooKeeper.broker_active", return_value=True), + patch("health.KafkaHealth.machine_configured", return_value=False), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + ): + harness.charm.on.update_status.emit() + assert harness.charm.unit.status == Status.SYSCONF_NOT_OPTIMAL.value.status + + +def test_update_status_sets_active( + harness: Harness, zk_data, passwords_data, patched_health_machine_configured +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -192,16 +380,16 @@ def test_update_status_sets_active(harness, zk_data, passwords_data): harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("snap.KafkaSnap.active", return_value=True), - patch("charm.broker_active", return_value=True), - patch("health.KafkaHealth.machine_configured", return_value=True), - patch("upgrade.KafkaUpgrade.idle", return_value=True), + patch("workload.KafkaWorkload.active", return_value=True), + patch("core.cluster.ZooKeeper.broker_active", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): harness.charm.on.update_status.emit() - assert isinstance(harness.charm.unit.status, ActiveStatus) + assert harness.charm.unit.status == Status.ACTIVE.value.status -def test_storage_add_does_nothing_if_snap_not_active(harness, zk_data, passwords_data): +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") +def test_storage_add_does_nothing_if_snap_not_active(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -211,10 +399,8 @@ def test_storage_add_does_nothing_if_snap_not_active(harness, zk_data, passwords harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("snap.KafkaSnap.active", return_value=False), + patch("workload.KafkaWorkload.active", return_value=False), patch("charm.KafkaCharm._disable_enable_restart") as patched_restart, - patch("charm.set_snap_ownership"), - patch("charm.set_snap_mode_bits"), ): harness.add_storage(storage_name="data", count=2) harness.attach_storage(storage_id="data/1") @@ -222,7 +408,8 @@ def test_storage_add_does_nothing_if_snap_not_active(harness, zk_data, passwords assert patched_restart.call_count == 0 -def test_storage_add_defers_if_service_not_healthy(harness, zk_data, passwords_data): +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") +def test_storage_add_defers_if_service_not_healthy(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -232,12 +419,10 @@ def test_storage_add_defers_if_service_not_healthy(harness, zk_data, passwords_d harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("snap.KafkaSnap.active", return_value=True), + patch("workload.KafkaWorkload.active", return_value=True), patch("charm.KafkaCharm.healthy", return_value=False), patch("charm.KafkaCharm._disable_enable_restart") as patched_restart, patch("ops.framework.EventBase.defer") as patched_defer, - patch("charm.set_snap_ownership"), - patch("charm.set_snap_mode_bits"), ): harness.add_storage(storage_name="data", count=2) harness.attach_storage(storage_id="data/1") @@ -246,7 +431,8 @@ def test_storage_add_defers_if_service_not_healthy(harness, zk_data, passwords_d assert patched_defer.call_count == 1 -def test_storage_add_disableenables_and_starts(harness, zk_data, passwords_data): +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") +def test_storage_add_disableenables_and_starts(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -256,18 +442,16 @@ def test_storage_add_disableenables_and_starts(harness, zk_data, passwords_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with ( - patch("snap.KafkaSnap.active", return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock(return_value=True)), - patch("upgrade.KafkaUpgrade.idle", return_value=True), - patch("config.KafkaConfig.set_server_properties"), - patch("config.KafkaConfig.set_client_properties"), - patch("config.KafkaConfig.set_environment"), - patch("charm.safe_get_file", return_value=["gandalf=grey"]), - patch("snap.KafkaSnap.disable_enable") as patched_disable_enable, - patch("snap.KafkaSnap.start_snap_service") as patched_start, + patch("workload.KafkaWorkload.active", return_value=True), + patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("managers.config.KafkaConfigManager.set_server_properties"), + patch("managers.config.KafkaConfigManager.set_client_properties"), + patch("managers.config.KafkaConfigManager.set_environment"), + patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), + patch("workload.KafkaWorkload.disable_enable") as patched_disable_enable, + patch("workload.KafkaWorkload.start") as patched_start, patch("ops.framework.EventBase.defer") as patched_defer, - patch("charm.set_snap_ownership"), - patch("charm.set_snap_mode_bits"), ): harness.add_storage(storage_name="data", count=2) harness.attach_storage(storage_id="data/1") @@ -277,7 +461,8 @@ def test_storage_add_disableenables_and_starts(harness, zk_data, passwords_data) assert patched_defer.call_count == 0 -def test_storage_detaching_disableenables_and_starts(harness, zk_data, passwords_data): +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") +def test_storage_detaching_disableenables_and_starts(harness: Harness, zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -289,15 +474,14 @@ def test_storage_detaching_disableenables_and_starts(harness, zk_data, passwords harness.attach_storage(storage_id="data/1") with ( - patch("snap.KafkaSnap.active", return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock(return_value=True)), - patch("upgrade.KafkaUpgrade.idle", return_value=True), - patch("config.KafkaConfig.set_server_properties"), - patch("config.KafkaConfig.set_client_properties"), - patch("config.KafkaConfig.set_environment"), - patch("charm.safe_get_file", return_value=["gandalf=grey"]), - patch("snap.KafkaSnap.disable_enable") as patched_disable_enable, - patch("snap.KafkaSnap.start_snap_service") as patched_start, + patch("workload.KafkaWorkload.active", return_value=True), + patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("managers.config.KafkaConfigManager.set_server_properties"), + patch("managers.config.KafkaConfigManager.set_client_properties"), + patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), + patch("workload.KafkaWorkload.disable_enable") as patched_disable_enable, + patch("workload.KafkaWorkload.start") as patched_start, patch("ops.framework.EventBase.defer") as patched_defer, ): harness.detach_storage(storage_id="data/1") @@ -307,64 +491,7 @@ def test_storage_detaching_disableenables_and_starts(harness, zk_data, passwords assert patched_defer.call_count == 0 -def test_install_sets_env_vars(harness): - """Checks KAFKA_OPTS and other vars are written to /etc/environment on install hook.""" - with ( - patch("snap.KafkaSnap.install"), - patch("config.KafkaConfig.set_environment") as patched_kafka_opts, - patch("charm.sysctl.Config.configure"), - ): - harness.charm.on.install.emit() - - patched_kafka_opts.assert_called_once() - - -def test_install_waits_until_zookeeper_relation(harness): - """Checks unit goes to WaitingStatus without ZK relation on install hook.""" - with ( - patch("snap.KafkaSnap.install"), - patch("config.KafkaConfig.set_environment"), - patch("charm.sysctl.Config.configure"), - ): - harness.charm.on.install.emit() - assert isinstance(harness.charm.unit.status, BlockedStatus) - - -def test_install_blocks_snap_install_failure(harness): - """Checks unit goes to BlockedStatus after snap failure on install hook.""" - with ( - patch("snap.KafkaSnap.install", return_value=False), - patch("config.KafkaConfig.set_environment"), - patch("charm.sysctl.Config.configure"), - ): - harness.charm.on.install.emit() - assert isinstance(harness.charm.unit.status, BlockedStatus) - - -def test_install_configures_os(harness): - with ( - patch("snap.KafkaSnap.install"), - patch("config.KafkaConfig.set_environment"), - patch("charm.sysctl.Config.configure") as patched_os_config, - ): - harness.charm.on.install.emit() - - patched_os_config.assert_called_once_with(OS_REQUIREMENTS) - - -def test_install_sets_status_if_os_config_fails(harness): - with ( - patch("snap.KafkaSnap.install"), - patch("config.KafkaConfig.set_environment"), - patch("charm.sysctl.Config.configure") as patched_os_config, - ): - patched_os_config.side_effect = ApplyError("Error setting values") - harness.charm.on.install.emit() - - assert isinstance(harness.charm.unit.status, BlockedStatus) - - -def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk(harness, zk_data): +def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk(harness: Harness, zk_data): """Checks inter-broker passwords are created on zookeeper-changed hook using zk auth.""" with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) @@ -373,17 +500,20 @@ def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk(harness, zk_ zk_rel_id = harness.add_relation(ZK, ZK) with ( - patch("auth.KafkaAuth.add_user") as patched_add_user, - patch("config.KafkaConfig.set_zk_jaas_config") as patched_set_zk_jaas, - patch("config.KafkaConfig.set_server_properties") as patched_set_server_properties, + patch("workload.KafkaWorkload.active", return_value=True), + patch("managers.auth.AuthManager.add_user") as patched_add_user, + patch("managers.config.KafkaConfigManager.set_zk_jaas_config") as patched_set_zk_jaas, + patch( + "managers.config.KafkaConfigManager.set_server_properties" + ) as patched_set_server_properties, ): harness.update_relation_data(zk_rel_id, ZK, zk_data) for user in INTERNAL_USERS: - assert harness.charm.app_peer_data.get(f"{user}-password", None) + assert harness.charm.state.cluster.relation_data.get(f"{user}-password", None) - patched_set_zk_jaas.assert_called_once() - patched_set_server_properties.assert_called_once() + patched_set_zk_jaas.assert_called() + patched_set_server_properties.assert_called() # checks all users are INTERNAL only for call in patched_add_user.kwargs.get("username", []): @@ -394,7 +524,7 @@ def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk(harness, zk_ assert True -def test_zookeeper_joined_sets_chroot(harness): +def test_zookeeper_joined_sets_chroot(harness: Harness): """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" harness.add_relation(PEER, CHARM_KEY) harness.set_leader(True) @@ -406,188 +536,98 @@ def test_zookeeper_joined_sets_chroot(harness): ) -def test_zookeeper_broken_stops_service(harness): +def test_zookeeper_broken_stops_service_and_removes_meta_properties(harness: Harness): """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) - with patch("snap.KafkaSnap.stop_snap_service") as patched_stop_snap_service: - harness.remove_relation(zk_rel_id) - - patched_stop_snap_service.assert_called_once() - assert isinstance(harness.charm.unit.status, BlockedStatus) - - -def test_start_defers_without_zookeeper(harness): - """Checks event deferred and not lost without ZK relation on start hook.""" - with patch("ops.framework.EventBase.defer") as patched_defer: - harness.charm.on.start.emit() - - patched_defer.assert_called_once() - - -def test_start_sets_necessary_config(harness): - """Checks event writes all needed config to unit on start hook.""" - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.set_leader(True) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - with ( - patch("config.KafkaConfig.set_zk_jaas_config") as patched_jaas, - patch("config.KafkaConfig.set_server_properties") as patched_server_properties, - patch("config.KafkaConfig.set_client_properties") as patched_client_properties, - patch("charm.KafkaCharm._update_internal_user"), - patch("snap.KafkaSnap.start_snap_service"), - patch("charm.KafkaCharm._on_update_status"), - patch("charm.KafkaCharm.ready_to_start", return_value=True), + patch("workload.KafkaWorkload.stop") as patched_stop_snap_service, + patch("workload.KafkaWorkload.exec") as patched_exec, ): - harness.update_relation_data(zk_rel_id, ZK, {"username": "glorfindel"}) - harness.charm.on.start.emit() - patched_jaas.assert_called_once() - patched_server_properties.assert_called_once() - patched_client_properties.assert_called_once() - - -def test_start_does_not_start_if_not_ready(harness): - """Checks snap service does not start before ready on start hook.""" - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - - with ( - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=False), - patch("snap.KafkaSnap.start_snap_service") as patched_start_snap_service, - patch("ops.framework.EventBase.defer") as patched_defer, - patch("config.KafkaConfig.zookeeper_connected", return_value=True), - patch("config.KafkaConfig.internal_user_credentials", return_value="orthanc"), - ): - harness.charm.on.start.emit() - - patched_start_snap_service.assert_not_called() - patched_defer.assert_called() - - -def test_start_does_not_start_if_not_same_tls_as_zk(harness): - """Checks snap service does not start if mismatch Kafka+ZK TLS on start hook.""" - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - - with ( - patch("auth.KafkaAuth.add_user"), - patch("config.KafkaConfig.set_zk_jaas_config"), - patch("config.KafkaConfig.set_server_properties"), - patch("config.KafkaConfig.set_client_properties"), - patch("snap.KafkaSnap.start_snap_service") as patched_start_snap_service, - patch("config.KafkaConfig.zookeeper_connected", return_value=True), - patch("config.KafkaConfig.internal_user_credentials", return_value="orthanc"), - patch("tls.KafkaTLS.enabled", return_value=True), - ): - harness.charm.on.start.emit() + harness.remove_relation(zk_rel_id) - patched_start_snap_service.assert_not_called() + patched_stop_snap_service.assert_called_once() + assert re.match(r"rm .*/meta.properties", patched_exec.call_args_list[0].args[0]) assert isinstance(harness.charm.unit.status, BlockedStatus) -def test_start_does_not_start_if_leader_has_not_set_creds(harness): - """Checks snap service does not start without inter-broker creds on start hook.""" - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - harness.update_relation_data(peer_rel_id, CHARM_KEY, {"sync-password": "mellon"}) - - with ( - patch("config.KafkaConfig.set_zk_jaas_config"), - patch("config.KafkaConfig.set_server_properties"), - patch("config.KafkaConfig.set_client_properties"), - patch("snap.KafkaSnap.start_snap_service") as patched_start_snap_service, - patch("config.KafkaConfig.zookeeper_connected", return_value=True), - ): - harness.charm.on.start.emit() - - patched_start_snap_service.assert_not_called() - assert isinstance(harness.charm.unit.status, WaitingStatus) - - -def test_start_blocks_if_service_failed_silently(harness): - """Checks unit is not ActiveStatus if snap service start failed silently on start hook.""" - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - harness.set_leader(True) +def test_zookeeper_broken_cleans_internal_user_credentials(harness: Harness): + """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" + with harness.hooks_disabled(): + harness.add_relation(PEER, CHARM_KEY) + zk_rel_id = harness.add_relation(ZK, ZK) + harness.set_leader(True) with ( - patch("auth.KafkaAuth.add_user"), - patch("config.KafkaConfig.set_zk_jaas_config"), - patch("config.KafkaConfig.set_server_properties"), - patch("config.KafkaConfig.set_client_properties"), - patch("snap.KafkaSnap.start_snap_service") as patched_start_snap_service, - patch("charm.broker_active", return_value=False) as patched_broker_active, - patch("config.KafkaConfig.internal_user_credentials", return_value="orthanc"), - patch("config.KafkaConfig.zookeeper_connected", return_value=True), + patch("workload.KafkaWorkload.stop"), + patch("workload.KafkaWorkload.exec"), + patch("core.models.StateBase.update") as patched_update, + patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={"saruman": "orthanc"}, + ), ): - patched_broker_active.retry.wait = wait_none - harness.charm.on.start.emit() + harness.remove_relation(zk_rel_id) - patched_start_snap_service.assert_called_once() - assert isinstance(harness.charm.unit.status, BlockedStatus) + patched_update.assert_called_once_with({"saruman-password": ""}) -def test_config_changed_updates_server_properties(harness): +def test_config_changed_updates_server_properties(harness: Harness, zk_data): """Checks that new charm/unit config writes server config to unit on config changed hook.""" - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") + with harness.hooks_disabled(): + peer_rel_id = harness.add_relation(PEER, CHARM_KEY) + zk_rel_id = harness.add_relation(ZK, ZK) + harness.add_relation_unit(zk_rel_id, f"{ZK}/0") + harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") + harness.update_relation_data(zk_rel_id, ZK, zk_data) with ( patch( - "config.KafkaConfig.server_properties", + "managers.config.KafkaConfigManager.server_properties", new_callable=PropertyMock, return_value=["gandalf=white"], ), - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch("upgrade.KafkaUpgrade.idle", return_value=True), - patch("charm.safe_get_file", return_value=["gandalf=grey"]), - patch("config.KafkaConfig.set_server_properties") as set_server_properties, - patch("config.KafkaConfig.set_client_properties"), - patch("config.KafkaConfig.set_environment"), + patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), + patch("managers.config.KafkaConfigManager.set_server_properties") as set_server_properties, + patch("managers.config.KafkaConfigManager.set_client_properties"), ): harness.charm.on.config_changed.emit() set_server_properties.assert_called_once() -def test_config_changed_updates_client_properties(harness): +def test_config_changed_updates_client_properties(harness: Harness): """Checks that new charm/unit config writes client config to unit on config changed hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") with ( patch( - "config.KafkaConfig.client_properties", + "managers.config.KafkaConfigManager.client_properties", new_callable=PropertyMock, return_value=["gandalf=white"], ), patch( - "config.KafkaConfig.server_properties", + "managers.config.KafkaConfigManager.server_properties", new_callable=PropertyMock, return_value=["sauron=bad"], ), - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch("upgrade.KafkaUpgrade.idle", return_value=True), - patch("charm.safe_get_file", return_value=["gandalf=grey"]), - patch("config.KafkaConfig.set_server_properties"), - patch("config.KafkaConfig.set_client_properties") as set_client_properties, - patch("config.KafkaConfig.set_environment"), + patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), + patch("managers.config.KafkaConfigManager.set_server_properties"), + patch("managers.config.KafkaConfigManager.set_client_properties") as set_client_properties, ): harness.charm.on.config_changed.emit() set_client_properties.assert_called_once() -def test_config_changed_updates_client_data(harness): +def test_config_changed_updates_client_data(harness: Harness): """Checks that provided relation data updates on config changed hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -595,16 +635,20 @@ def test_config_changed_updates_client_data(harness): with ( patch( - "config.KafkaConfig.server_properties", + "managers.config.KafkaConfigManager.server_properties", new_callable=PropertyMock, return_value=["gandalf=white"], ), - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch("upgrade.KafkaUpgrade.idle", return_value=True), - patch("charm.safe_get_file", return_value=["gandalf=white"]), - patch("provider.KafkaProvider.update_connection_info") as patched_update_connection_info, - patch("config.KafkaConfig.set_client_properties") as patched_set_client_properties, + patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), + patch("managers.config.KafkaConfigManager.set_zk_jaas_config"), + patch( + "events.provider.KafkaProvider.update_connection_info" + ) as patched_update_connection_info, + patch( + "managers.config.KafkaConfigManager.set_client_properties" + ) as patched_set_client_properties, ): harness.set_leader(True) harness.charm.on.config_changed.emit() @@ -613,7 +657,7 @@ def test_config_changed_updates_client_data(harness): patched_update_connection_info.assert_called_once() -def test_config_changed_restarts(harness): +def test_config_changed_restarts(harness: Harness): """Checks units rolling-restat on config changed hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -623,27 +667,33 @@ def test_config_changed_restarts(harness): with ( patch( - "config.KafkaConfig.server_properties", + "managers.config.KafkaConfigManager.server_properties", new_callable=PropertyMock, return_value=["gandalf=grey"], ), - patch("config.KafkaConfig.set_environment"), - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch("charm.safe_get_file", return_value=["gandalf=white"]), - patch("upgrade.KafkaUpgrade.idle", return_value=True), - patch("config.safe_write_to_file", return_value=None), - patch("snap.KafkaSnap.restart_snap_service") as patched_restart_snap_service, - patch("charm.broker_active", return_value=True), - patch("config.KafkaConfig.zookeeper_connected", return_value=True), - patch("auth.KafkaAuth.add_user"), - patch("config.KafkaConfig.set_zk_jaas_config"), - patch("config.KafkaConfig.set_server_properties"), + patch("charm.KafkaCharm.healthy", return_value=True), + patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), + patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch("workload.KafkaWorkload.restart") as patched_restart_snap_service, + patch("core.cluster.ZooKeeper.broker_active", return_value=True), + patch("core.cluster.ZooKeeper.zookeeper_connected", return_value=True), + patch("managers.auth.AuthManager.add_user"), + patch("managers.config.KafkaConfigManager.set_zk_jaas_config"), + patch("managers.config.KafkaConfigManager.set_server_properties"), ): harness.update_relation_data(zk_rel_id, ZK, {"username": "glorfindel"}) - patched_restart_snap_service.reset_mock() harness.charm.on.config_changed.emit() - patched_restart_snap_service.assert_called_once() + + +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") +def test_on_remove_sysctl_is_deleted(harness: Harness): + peer_rel_id = harness.add_relation(PEER, CHARM_KEY) + harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") + + with patch("charm.sysctl.Config.remove") as patched_sysctl_remove: + harness.charm.on.remove.emit() + + patched_sysctl_remove.assert_called_once() diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index d0bbf422..35701794 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. from pathlib import Path @@ -13,6 +13,7 @@ from literals import ( ADMIN_USER, CHARM_KEY, + CONTAINER, DEPENDENCIES, INTER_BROKER_USER, INTERNAL_USERS, @@ -20,8 +21,10 @@ JVM_MEM_MAX_GB, JVM_MEM_MIN_GB, PEER, + SUBSTRATE, ZK, ) +from managers.config import KafkaConfigManager CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) @@ -30,7 +33,11 @@ @pytest.fixture def harness(): - harness = Harness(KafkaCharm, meta=METADATA) + harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + harness.add_relation("restart", CHARM_KEY) harness._update_config( { @@ -42,34 +49,34 @@ def harness(): return harness -def test_all_storages_in_log_dirs(harness): +def test_all_storages_in_log_dirs(harness: Harness): """Checks that the log.dirs property updates with all available storages.""" storage_metadata = harness.charm.meta.storages["data"] - min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 0 + min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 1 with harness.hooks_disabled(): harness.add_storage(storage_name="data", count=min_storages, attach=True) - assert len(harness.charm.kafka_config.log_dirs.split(",")) == len( + assert len(harness.charm.state.log_dirs.split(",")) == len( harness.charm.model.storages["data"] ) -def test_internal_credentials_only_return_when_all_present(harness): +def test_internal_credentials_only_return_when_all_present(harness: Harness): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.update_relation_data( peer_rel_id, CHARM_KEY, {f"{INTERNAL_USERS[0]}-password": "mellon"} ) - assert not harness.charm.kafka_config.internal_user_credentials + assert not harness.charm.state.cluster.internal_user_credentials for user in INTERNAL_USERS: harness.update_relation_data(peer_rel_id, CHARM_KEY, {f"{user}-password": "mellon"}) - assert harness.charm.kafka_config.internal_user_credentials - assert len(harness.charm.kafka_config.internal_user_credentials) == len(INTERNAL_USERS) + assert harness.charm.state.cluster.internal_user_credentials + assert len(harness.charm.state.cluster.internal_user_credentials) == len(INTERNAL_USERS) -def test_log_dirs_in_server_properties(harness): +def test_log_dirs_in_server_properties(harness: Harness): """Checks that log.dirs are added to server_properties.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -85,25 +92,27 @@ def test_log_dirs_in_server_properties(harness): }, ) peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) found_log_dirs = False with ( patch( - "config.KafkaConfig.internal_user_credentials", + "core.models.KafkaCluster.internal_user_credentials", new_callable=PropertyMock, return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ) ): - for prop in harness.charm.kafka_config.server_properties: + for prop in harness.charm.config_manager.server_properties: if "log.dirs" in prop: found_log_dirs = True assert found_log_dirs -def test_listeners_in_server_properties(harness): +def test_listeners_in_server_properties(harness: Harness): """Checks that listeners are split into INTERNAL and EXTERNAL.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -119,26 +128,26 @@ def test_listeners_in_server_properties(harness): }, ) peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) expected_listeners = "listeners=INTERNAL_SASL_PLAINTEXT://:19092" - expected_advertised_listeners = ( - "advertised.listeners=INTERNAL_SASL_PLAINTEXT://treebeard:19092" - ) + expected_advertised_listeners = f"advertised.listeners=INTERNAL_SASL_PLAINTEXT://{'treebeard' if SUBSTRATE == 'vm' else 'kafka-k8s-0.kafka-k8s-endpoints'}:19092" with ( patch( - "config.KafkaConfig.internal_user_credentials", + "core.models.KafkaCluster.internal_user_credentials", new_callable=PropertyMock, return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ) ): - assert expected_listeners in harness.charm.kafka_config.server_properties - assert expected_advertised_listeners in harness.charm.kafka_config.server_properties + assert expected_listeners in harness.charm.config_manager.server_properties + assert expected_advertised_listeners in harness.charm.config_manager.server_properties -def test_ssl_listeners_in_server_properties(harness): +def test_ssl_listeners_in_server_properties(harness: Harness): """Checks that listeners are added after TLS relation are created.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) # Simulate data-integrator relation @@ -162,31 +171,35 @@ def test_ssl_listeners_in_server_properties(harness): }, ) peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") harness.update_relation_data( peer_relation_id, - "kafka/0", + f"{CHARM_KEY}/0", {"private-address": "treebeard", "certificate": "keepitsecret"}, ) - harness.update_relation_data(peer_relation_id, "kafka", {"tls": "enabled", "mtls": "enabled"}) + harness.update_relation_data( + peer_relation_id, CHARM_KEY, {"tls": "enabled", "mtls": "enabled"} + ) + + host = "treebeard" if SUBSTRATE == "vm" else "kafka-k8s-0.kafka-k8s-endpoints" expected_listeners = ( "listeners=INTERNAL_SASL_SSL://:19093,CLIENT_SASL_SSL://:9093,CLIENT_SSL://:9094" ) - expected_advertised_listeners = "advertised.listeners=INTERNAL_SASL_SSL://treebeard:19093,CLIENT_SASL_SSL://treebeard:9093,CLIENT_SSL://treebeard:9094" + expected_advertised_listeners = f"advertised.listeners=INTERNAL_SASL_SSL://{host}:19093,CLIENT_SASL_SSL://{host}:9093,CLIENT_SSL://{host}:9094" with ( patch( - "config.KafkaConfig.internal_user_credentials", + "core.models.KafkaCluster.internal_user_credentials", new_callable=PropertyMock, return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ) ): - assert expected_listeners in harness.charm.kafka_config.server_properties - assert expected_advertised_listeners in harness.charm.kafka_config.server_properties + assert expected_listeners in harness.charm.config_manager.server_properties + assert expected_advertised_listeners in harness.charm.config_manager.server_properties -def test_zookeeper_config_succeeds_fails_config(harness): +def test_zookeeper_config_succeeds_fails_config(harness: Harness): """Checks that no ZK config is returned if missing field.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -200,11 +213,10 @@ def test_zookeeper_config_succeeds_fails_config(harness): "tls": "disabled", }, ) - assert harness.charm.kafka_config.zookeeper_config == {} - assert not harness.charm.kafka_config.zookeeper_connected + assert not harness.charm.state.zookeeper.zookeeper_connected -def test_zookeeper_config_succeeds_valid_config(harness): +def test_zookeeper_config_succeeds_valid_config(harness: Harness): """Checks that ZK config is returned if all fields.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -219,50 +231,52 @@ def test_zookeeper_config_succeeds_valid_config(harness): "tls": "disabled", }, ) - assert "connect" in harness.charm.kafka_config.zookeeper_config - assert ( - harness.charm.kafka_config.zookeeper_config["connect"] == "1.1.1.1:2181,2.2.2.2:2181/kafka" - ) - assert harness.charm.kafka_config.zookeeper_connected + assert harness.charm.state.zookeeper.connect == "1.1.1.1:2181,2.2.2.2:2181/kafka" + assert harness.charm.state.zookeeper.zookeeper_connected -def test_kafka_opts(harness): +def test_kafka_opts(harness: Harness): """Checks necessary args for KAFKA_OPTS.""" - args = harness.charm.kafka_config.kafka_opts + args = harness.charm.config_manager.kafka_opts assert "-Djava.security.auth.login.config" in args assert "KAFKA_OPTS" in args @pytest.mark.parametrize( "profile,expected", - [("testing", JVM_MEM_MIN_GB), ("production", JVM_MEM_MAX_GB)], + [("production", JVM_MEM_MAX_GB), ("testing", JVM_MEM_MIN_GB)], ) -def test_heap_opts(harness, profile, expected): +def test_heap_opts(harness: Harness, profile, expected): """Checks necessary args for KAFKA_HEAP_OPTS.""" - harness._update_config({"profile": profile}) - args = harness.charm.kafka_config.heap_opts + # Harness doesn't reinitialize KafkaCharm when calling update_config, which means that + # self.config is not passed again to KafkaConfigManager + harness.update_config({"profile": profile}) + conf_manager = KafkaConfigManager( + harness.charm.state, harness.charm.workload, harness.charm.config, "1" + ) + args = conf_manager.heap_opts + assert f"Xms{expected}G" in args assert f"Xmx{expected}G" in args assert "KAFKA_HEAP_OPTS" in args -def test_jmx_opts(harness): +def test_jmx_opts(harness: Harness): """Checks necessary args for KAFKA_JMX_OPTS.""" - args = harness.charm.kafka_config.jmx_opts + args = harness.charm.config_manager.jmx_opts assert "-javaagent:" in args assert args.split(":")[1].split("=")[-1] == str(JMX_EXPORTER_PORT) assert "KAFKA_JMX_OPTS" in args -def test_set_environment(harness): +def test_set_environment(harness: Harness): """Checks all necessary env-vars are written to /etc/environment.""" with ( - patch("config.safe_write_to_file") as patched_write, + patch("workload.KafkaWorkload.write") as patched_write, patch("builtins.open", mock_open()), patch("shutil.chown"), - patch("utils.set_snap_ownership"), ): - harness.charm.kafka_config.set_environment() + harness.charm.config_manager.set_environment() for call in patched_write.call_args_list: assert "KAFKA_OPTS" in call.kwargs.get("content", "") @@ -273,41 +287,45 @@ def test_set_environment(harness): assert "/etc/environment" == call.kwargs.get("path", "") -def test_bootstrap_server(harness): +def test_bootstrap_server(harness: Harness): """Checks the bootstrap-server property setting.""" peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) - harness.update_relation_data(peer_relation_id, "kafka/1", {"private-address": "shelob"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) + harness.update_relation_data(peer_relation_id, f"{CHARM_KEY}/1", {"private-address": "shelob"}) - assert len(harness.charm.kafka_config.bootstrap_server) == 2 - for server in harness.charm.kafka_config.bootstrap_server: + assert len(harness.charm.state.bootstrap_server) == 2 + for server in harness.charm.state.bootstrap_server: assert "9092" in server -def test_default_replication_properties_less_than_three(harness): +def test_default_replication_properties_less_than_three(harness: Harness): """Checks replication property defaults updates with units < 3.""" - assert "num.partitions=1" in harness.charm.kafka_config.default_replication_properties + assert "num.partitions=1" in harness.charm.config_manager.default_replication_properties assert ( - "default.replication.factor=1" in harness.charm.kafka_config.default_replication_properties + "default.replication.factor=1" + in harness.charm.config_manager.default_replication_properties ) - assert "min.insync.replicas=1" in harness.charm.kafka_config.default_replication_properties + assert "min.insync.replicas=1" in harness.charm.config_manager.default_replication_properties -def test_default_replication_properties_more_than_three(harness): +def test_default_replication_properties_more_than_three(harness: Harness): """Checks replication property defaults updates with units > 3.""" peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") - harness.add_relation_unit(peer_relation_id, "kafka/2") - harness.add_relation_unit(peer_relation_id, "kafka/3") - harness.add_relation_unit(peer_relation_id, "kafka/4") - harness.add_relation_unit(peer_relation_id, "kafka/5") + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/2") + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/3") + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/4") + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/5") - assert "num.partitions=3" in harness.charm.kafka_config.default_replication_properties + assert "num.partitions=3" in harness.charm.config_manager.default_replication_properties assert ( - "default.replication.factor=3" in harness.charm.kafka_config.default_replication_properties + "default.replication.factor=3" + in harness.charm.config_manager.default_replication_properties ) - assert "min.insync.replicas=2" in harness.charm.kafka_config.default_replication_properties + assert "min.insync.replicas=2" in harness.charm.config_manager.default_replication_properties def test_ssl_principal_mapping_rules(harness: Harness): @@ -329,19 +347,25 @@ def test_ssl_principal_mapping_rules(harness: Harness): with ( patch( - "config.KafkaConfig.internal_user_credentials", + "core.models.KafkaCluster.internal_user_credentials", new_callable=PropertyMock, return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ) ): - harness.update_config({"ssl_principal_mapping_rules": "RULE:^(erebor)$/$1,DEFAULT"}) + # Harness doesn't reinitialize KafkaCharm when calling update_config, which means that + # self.config is not passed again to KafkaConfigManager + harness._update_config({"ssl_principal_mapping_rules": "RULE:^(erebor)$/$1,DEFAULT"}) + conf_manager = KafkaConfigManager( + harness.charm.state, harness.charm.workload, harness.charm.config, "1" + ) + assert ( "ssl.principal.mapping.rules=RULE:^(erebor)$/$1,DEFAULT" - in harness.charm.kafka_config.server_properties + in conf_manager.server_properties ) -def test_auth_properties(harness): +def test_auth_properties(harness: Harness): """Checks necessary auth properties are present.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) peer_relation_id = harness.add_relation(PEER, CHARM_KEY) @@ -361,10 +385,10 @@ def test_auth_properties(harness): }, ) - assert "broker.id=0" in harness.charm.kafka_config.auth_properties + assert "broker.id=0" in harness.charm.config_manager.auth_properties assert ( - f"zookeeper.connect={harness.charm.kafka_config.zookeeper_config['connect']}" - in harness.charm.kafka_config.auth_properties + f"zookeeper.connect={harness.charm.state.zookeeper.connect}" + in harness.charm.config_manager.auth_properties ) @@ -387,15 +411,15 @@ def test_rack_properties(harness: Harness): with ( patch( - "config.KafkaConfig.rack_properties", + "managers.config.KafkaConfigManager.rack_properties", new_callable=PropertyMock, return_value=["broker.rack=gondor-west"], ) ): - assert "broker.rack=gondor-west" in harness.charm.kafka_config.server_properties + assert "broker.rack=gondor-west" in harness.charm.config_manager.server_properties -def test_inter_broker_protocol_version(harness): +def test_inter_broker_protocol_version(harness: Harness): """Checks that rack properties are added to server properties.""" harness.add_relation(PEER, CHARM_KEY) zk_relation_id = harness.add_relation(ZK, CHARM_KEY) @@ -413,10 +437,10 @@ def test_inter_broker_protocol_version(harness): ) assert len(DEPENDENCIES["kafka_service"]["version"].split(".")) == 3 - assert "inter.broker.protocol.version=3.6" in harness.charm.kafka_config.server_properties + assert "inter.broker.protocol.version=3.6" in harness.charm.config_manager.server_properties -def test_super_users(harness): +def test_super_users(harness: Harness): """Checks super-users property is updated for new admin clients.""" peer_relation_id = harness.add_relation(PEER, CHARM_KEY) app_relation_id = harness.add_relation("kafka-client", "app") @@ -426,20 +450,20 @@ def test_super_users(harness): appii_relation_id, "appii", {"extra-user-roles": "admin,consumer"} ) - assert len(harness.charm.kafka_config.super_users.split(";")) == len(INTERNAL_USERS) + assert len(harness.charm.state.super_users.split(";")) == len(INTERNAL_USERS) harness.update_relation_data( peer_relation_id, harness.charm.app.name, {f"relation-{app_relation_id}": "mellon"} ) - assert len(harness.charm.kafka_config.super_users.split(";")) == (len(INTERNAL_USERS) + 1) + assert len(harness.charm.state.super_users.split(";")) == (len(INTERNAL_USERS) + 1) harness.update_relation_data( peer_relation_id, harness.charm.app.name, {f"relation-{appii_relation_id}": "mellon"} ) - assert len(harness.charm.kafka_config.super_users.split(";")) == (len(INTERNAL_USERS) + 2) + assert len(harness.charm.state.super_users.split(";")) == (len(INTERNAL_USERS) + 2) harness.update_relation_data(appii_relation_id, "appii", {"extra-user-roles": "consumer"}) - assert len(harness.charm.kafka_config.super_users.split(";")) == (len(INTERNAL_USERS) + 1) + assert len(harness.charm.state.super_users.split(";")) == (len(INTERNAL_USERS) + 1) diff --git a/tests/unit/test_health.py b/tests/unit/test_health.py index 3fda9b9c..14b5b0b3 100644 --- a/tests/unit/test_health.py +++ b/tests/unit/test_health.py @@ -3,9 +3,8 @@ # See LICENSE file for licensing details. import logging -import unittest.mock from pathlib import Path -from unittest.mock import patch +from unittest.mock import mock_open, patch import pytest import yaml @@ -23,7 +22,7 @@ @pytest.fixture def harness(): - harness = Harness(KafkaCharm, meta=METADATA) + harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) harness.add_relation("restart", CHARM_KEY) harness._update_config( { @@ -44,7 +43,7 @@ def test_service_pid(harness): with ( patch( "builtins.open", - new_callable=unittest.mock.mock_open, + new_callable=mock_open, read_data="0::/system.slice/snap.charmed-kafka.daemon.service", ), patch("subprocess.check_output", return_value="1314231"), @@ -70,7 +69,7 @@ def test_check_vm_swappiness(harness): def test_check_total_memory_testing_profile(harness, total_mem_kb, profile, limit): harness._update_config({"profile": profile}) - with patch("health.safe_get_file", return_value=[f"MemTotal: {total_mem_kb} kB"]): + with patch("workload.KafkaWorkload.read", return_value=[f"MemTotal: {total_mem_kb} kB"]): if total_mem_kb / 1000000 <= limit: assert not harness.charm.health._check_total_memory() else: @@ -80,12 +79,12 @@ def test_check_total_memory_testing_profile(harness, total_mem_kb, profile, limi def test_get_partitions_size(harness): example_log_dirs = 'Querying brokers for log directories information\nReceived log directory information from brokers 0\n{"version":1,"brokers":[{"broker":0,"logDirs":[{"logDir":"/var/snap/charmed-kafka/common/var/lib/kafka/data/0","error":null,"partitions":[{"partition":"NEW-TOPIC-2-4","size":394,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-3","size":394,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-2","size":392,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-1","size":392,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-0","size":393,"offsetLag":0,"isFuture":false}]}]}]}\n' - with patch("snap.KafkaSnap.run_bin_command", return_value=example_log_dirs): + with patch("workload.KafkaWorkload.run_bin_command", return_value=example_log_dirs): assert harness.charm.health._get_partitions_size() == (5, 393) def test_check_file_descriptors_no_listeners(harness): - with patch("snap.KafkaSnap.run_bin_command") as patched_run_bin: + with patch("workload.KafkaWorkload.run_bin_command") as patched_run_bin: assert harness.charm.health._check_file_descriptors() assert patched_run_bin.call_count == 0 diff --git a/tests/unit/test_provider.py b/tests/unit/test_provider.py index 42e674da..53b5ec30 100644 --- a/tests/unit/test_provider.py +++ b/tests/unit/test_provider.py @@ -11,7 +11,7 @@ from ops.testing import Harness from charm import KafkaCharm -from literals import CHARM_KEY, PEER, REL_NAME +from literals import CHARM_KEY, CONTAINER, PEER, REL_NAME, SUBSTRATE logger = logging.getLogger(__name__) @@ -22,7 +22,11 @@ @pytest.fixture def harness(): - harness = Harness(KafkaCharm, meta=METADATA) + harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + harness.add_relation("restart", CHARM_KEY) harness._update_config( { @@ -35,14 +39,14 @@ def harness(): return harness -def test_client_relation_created_defers_if_not_ready(harness): +def test_client_relation_created_defers_if_not_ready(harness: Harness): """Checks event is deferred if not ready on clientrelationcreated hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) with ( - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=False), - patch("auth.KafkaAuth.add_user") as patched_add_user, + patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=False), + patch("managers.auth.AuthManager.add_user") as patched_add_user, patch("ops.framework.EventBase.defer") as patched_defer, ): harness.set_leader(True) @@ -58,22 +62,19 @@ def test_client_relation_created_defers_if_not_ready(harness): patched_defer.assert_called() -def test_client_relation_created_adds_user(harness): +def test_client_relation_created_adds_user(harness: Harness): """Checks if new users are added on clientrelationcreated hook.""" - harness.add_relation(PEER, CHARM_KEY) + with harness.hooks_disabled(): + harness.add_relation(PEER, CHARM_KEY) + harness.set_leader(True) + client_rel_id = harness.add_relation(REL_NAME, "app") + with ( - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("auth.KafkaAuth.add_user") as patched_add_user, - patch("snap.KafkaSnap.run_bin_command"), patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch( - "config.KafkaConfig.zookeeper_config", - new_callable=PropertyMock, - return_value={"connect": "yes"}, - ), + patch("managers.auth.AuthManager.add_user") as patched_add_user, + patch("workload.KafkaWorkload.run_bin_command"), + patch("core.cluster.ZooKeeper.connect", new_callable=PropertyMock, return_value="yes"), ): - harness.set_leader(True) - client_rel_id = harness.add_relation(REL_NAME, "app") harness.update_relation_data( client_rel_id, "app", @@ -81,25 +82,21 @@ def test_client_relation_created_adds_user(harness): ) patched_add_user.assert_called_once() + assert harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") - assert harness.charm.peer_relation.data[harness.charm.app].get(f"relation-{client_rel_id}") - -def test_client_relation_broken_removes_user(harness): +def test_client_relation_broken_removes_user(harness: Harness): """Checks if users are removed on clientrelationbroken hook.""" - harness.add_relation(PEER, CHARM_KEY) + with harness.hooks_disabled(): + harness.add_relation(PEER, CHARM_KEY) + with ( - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("auth.KafkaAuth.add_user"), - patch("auth.KafkaAuth.delete_user") as patched_delete_user, - patch("auth.KafkaAuth.remove_all_user_acls") as patched_remove_acls, - patch("snap.KafkaSnap.run_bin_command"), patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch( - "config.KafkaConfig.zookeeper_config", - new_callable=PropertyMock, - return_value={"connect": "yes"}, - ), + patch("managers.auth.AuthManager.add_user"), + patch("managers.auth.AuthManager.delete_user") as patched_delete_user, + patch("managers.auth.AuthManager.remove_all_user_acls") as patched_remove_acls, + patch("workload.KafkaWorkload.run_bin_command"), + patch("core.cluster.ZooKeeper.connect", new_callable=PropertyMock, return_value="yes"), ): harness.set_leader(True) client_rel_id = harness.add_relation(REL_NAME, "app") @@ -110,31 +107,26 @@ def test_client_relation_broken_removes_user(harness): ) # validating username got added - assert harness.charm.peer_relation.data[harness.charm.app].get(f"relation-{client_rel_id}") + assert harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") harness.remove_relation(client_rel_id) # validating username got removed - assert not harness.charm.peer_relation.data[harness.charm.app].get( - f"relation-{client_rel_id}" - ) + assert not harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") patched_remove_acls.assert_called_once() patched_delete_user.assert_called_once() -def test_client_relation_joined_sets_necessary_relation_data(harness): +def test_client_relation_joined_sets_necessary_relation_data(harness: Harness): """Checks if all needed provider relation data is set on clientrelationjoined hook.""" - harness.add_relation(PEER, CHARM_KEY) + with harness.hooks_disabled(): + harness.add_relation(PEER, CHARM_KEY) + with ( - patch("charm.KafkaCharm.ready_to_start", new_callable=PropertyMock, return_value=True), - patch("auth.KafkaAuth.add_user"), - patch("snap.KafkaSnap.run_bin_command"), patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch( - "config.KafkaConfig.zookeeper_config", - new_callable=PropertyMock, - return_value={"connect": "yes"}, - ), + patch("managers.auth.AuthManager.add_user"), + patch("workload.KafkaWorkload.run_bin_command"), + patch("core.cluster.ZooKeeper.connect", new_callable=PropertyMock, return_value="yes"), ): harness.set_leader(True) client_rel_id = harness.add_relation(REL_NAME, "app") diff --git a/tests/unit/test_structured_config.py b/tests/unit/test_structured_config.py index 371a9207..799674b9 100644 --- a/tests/unit/test_structured_config.py +++ b/tests/unit/test_structured_config.py @@ -10,7 +10,7 @@ from ops.testing import Harness from charm import KafkaCharm -from literals import CHARM_KEY +from literals import CHARM_KEY, CONTAINER, SUBSTRATE CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) @@ -22,6 +22,10 @@ @pytest.fixture def harness(): harness = Harness(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + harness.add_relation("restart", CHARM_KEY) harness.begin() return harness diff --git a/tests/unit/test_tls.py b/tests/unit/test_tls.py index ba0e3f99..35049c9c 100644 --- a/tests/unit/test_tls.py +++ b/tests/unit/test_tls.py @@ -4,6 +4,7 @@ import socket from pathlib import Path +from unittest.mock import patch import pytest import yaml @@ -11,7 +12,7 @@ from ops.testing import Harness from charm import KafkaCharm -from literals import CHARM_KEY, PEER, ZK +from literals import CHARM_KEY, CONTAINER, PEER, SUBSTRATE, ZK CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) @@ -20,7 +21,11 @@ @pytest.fixture def harness(): - harness = Harness(KafkaCharm, meta=METADATA) + harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + harness.add_relation("restart", CHARM_KEY) harness._update_config( { @@ -59,8 +64,10 @@ def harness(): def test_blocked_if_trusted_certificate_added_before_tls_relation(harness: Harness): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) harness.set_leader(True) harness.add_relation("trusted-certificate", "tls-one") @@ -71,14 +78,16 @@ def test_blocked_if_trusted_certificate_added_before_tls_relation(harness: Harne def test_mtls_flag_added(harness: Harness): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/1") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) - harness.update_relation_data(peer_relation_id, "kafka", {"tls": "enabled"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) + harness.update_relation_data(peer_relation_id, CHARM_KEY, {"tls": "enabled"}) harness.set_leader(True) harness.add_relation("trusted-certificate", "tls-one") - peer_relation_data = harness.get_relation_data(peer_relation_id, "kafka") + peer_relation_data = harness.get_relation_data(peer_relation_id, CHARM_KEY) assert peer_relation_data.get("mtls", "disabled") == "enabled" assert isinstance(harness.charm.app.status, ActiveStatus) @@ -86,8 +95,10 @@ def test_mtls_flag_added(harness: Harness): def test_extra_sans_config(harness: Harness): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/0") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/0") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) harness.update_config({"certificate_extra_sans": ""}) assert harness.charm.tls._extra_sans == [] @@ -102,12 +113,24 @@ def test_extra_sans_config(harness: Harness): def test_sans(harness: Harness): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, "kafka/0") - harness.update_relation_data(peer_relation_id, "kafka/0", {"private-address": "treebeard"}) + harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/0") + harness.update_relation_data( + peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ) harness.update_config({"certificate_extra_sans": "worker{unit}.com"}) sock_dns = socket.getfqdn() - assert harness.charm.tls._sans == { - "sans_ip": ["treebeard"], - "sans_dns": ["kafka/0", sock_dns, "worker0.com"], - } + if SUBSTRATE == "vm": + assert harness.charm.tls._sans == { + "sans_ip": ["treebeard"], + "sans_dns": [f"{CHARM_KEY}/0", sock_dns, "worker0.com"], + } + elif SUBSTRATE == "k8s": + # NOTE previous k8s sans_ip like kafka-k8s-0.kafka-k8s-endpoints or binding pod address + with patch("ops.model.Model.get_binding"): + assert harness.charm.tls._sans["sans_dns"] == [ + "kafka-k8s-0", + "kafka-k8s-0.kafka-k8s-endpoints", + sock_dns, + "worker0.com", + ] diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index de97dda8..e979c89e 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -13,9 +13,8 @@ from ops.testing import Harness from charm import KafkaCharm +from events.upgrade import KafkaDependencyModel from literals import CHARM_KEY, DEPENDENCIES, PEER, ZK -from snap import KafkaSnap -from upgrade import KafkaDependencyModel, KafkaUpgrade logger = logging.getLogger(__name__) @@ -49,25 +48,29 @@ def harness(zk_data): return harness -def test_pre_upgrade_check_raises_not_stable(harness): +def test_pre_upgrade_check_raises_not_stable(harness: Harness): with pytest.raises(ClusterNotReadyError): harness.charm.upgrade.pre_upgrade_check() -def test_pre_upgrade_check_succeeds(harness): +def test_pre_upgrade_check_succeeds(harness: Harness): with patch("charm.KafkaCharm.healthy", return_value=True): harness.charm.upgrade.pre_upgrade_check() -def test_build_upgrade_stack(harness): +def test_build_upgrade_stack(harness: Harness): with harness.hooks_disabled(): - harness.add_relation_unit(harness.charm.peer_relation.id, f"{CHARM_KEY}/1") + harness.add_relation_unit(harness.charm.state.peer_relation.id, f"{CHARM_KEY}/1") harness.update_relation_data( - harness.charm.peer_relation.id, f"{CHARM_KEY}/1", {"private-address": "111.111.111"} + harness.charm.state.peer_relation.id, + f"{CHARM_KEY}/1", + {"private-address": "111.111.111"}, ) - harness.add_relation_unit(harness.charm.peer_relation.id, f"{CHARM_KEY}/2") + harness.add_relation_unit(harness.charm.state.peer_relation.id, f"{CHARM_KEY}/2") harness.update_relation_data( - harness.charm.peer_relation.id, f"{CHARM_KEY}/2", {"private-address": "222.222.222"} + harness.charm.state.peer_relation.id, + f"{CHARM_KEY}/2", + {"private-address": "222.222.222"}, ) stack = harness.charm.upgrade.build_upgrade_stack() @@ -83,12 +86,20 @@ def test_kafka_dependency_model(): assert DependencyModel(**value) -def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails(harness): +def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails(harness: Harness): with ( patch.object(KazooClient, "start"), - patch("utils.ZooKeeperManager.get_leader", return_value="000.000.000"), + patch( + "core.models.ZooKeeperManager.get_leader", + new_callable=PropertyMock, + return_value="000.000.000", + ), # NOTE: Dependency requires >3 - patch("utils.ZooKeeperManager.get_version", return_value="1.2.3"), + patch( + "core.models.ZooKeeper.zookeeper_version", + new_callable=PropertyMock, + return_value="1.2.3", + ), ): mock_event = MagicMock() harness.charm.upgrade._on_upgrade_granted(mock_event) @@ -96,15 +107,15 @@ def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails(harness assert harness.charm.upgrade.state == "failed" -def test_upgrade_granted_sets_failed_if_failed_snap(harness): +def test_upgrade_granted_sets_failed_if_failed_snap(harness: Harness): with ( patch( - "upgrade.KafkaUpgrade.zookeeper_current_version", + "events.upgrade.KafkaUpgrade.zookeeper_current_version", new_callable=PropertyMock, return_value="3.6", ), - patch.object(KafkaSnap, "stop_snap_service") as patched_stop, - patch.object(KafkaSnap, "install", return_value=False), + patch("workload.KafkaWorkload.stop") as patched_stop, + patch("workload.KafkaWorkload.install", return_value=False), ): mock_event = MagicMock() harness.charm.upgrade._on_upgrade_granted(mock_event) @@ -113,17 +124,16 @@ def test_upgrade_granted_sets_failed_if_failed_snap(harness): assert harness.charm.upgrade.state == "failed" -def test_upgrade_granted_sets_failed_if_failed_upgrade_check(harness): +def test_upgrade_granted_sets_failed_if_failed_upgrade_check(harness: Harness): with ( patch( - "upgrade.KafkaUpgrade.zookeeper_current_version", + "events.upgrade.KafkaUpgrade.zookeeper_current_version", new_callable=PropertyMock, return_value="3.6", ), - patch.object(KafkaSnap, "stop_snap_service"), - patch.object(KafkaSnap, "restart_snap_service") as patched_restart, - patch.object(KafkaSnap, "install", return_value=True), - patch("config.KafkaConfig.set_environment"), + patch("workload.KafkaWorkload.stop"), + patch("workload.KafkaWorkload.restart") as patched_restart, + patch("workload.KafkaWorkload.install", return_value=True), patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=False), ): mock_event = MagicMock() @@ -133,17 +143,16 @@ def test_upgrade_granted_sets_failed_if_failed_upgrade_check(harness): assert harness.charm.upgrade.state == "failed" -def test_upgrade_granted_succeeds(harness): +def test_upgrade_granted_succeeds(harness: Harness): with ( patch( - "upgrade.KafkaUpgrade.zookeeper_current_version", + "events.upgrade.KafkaUpgrade.zookeeper_current_version", new_callable=PropertyMock, return_value="3.6", ), - patch("config.KafkaConfig.set_environment"), - patch.object(KafkaSnap, "stop_snap_service"), - patch.object(KafkaSnap, "restart_snap_service"), - patch.object(KafkaSnap, "install", return_value=True), + patch("workload.KafkaWorkload.stop"), + patch("workload.KafkaWorkload.restart"), + patch("workload.KafkaWorkload.install", return_value=True), patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), ): mock_event = MagicMock() @@ -152,22 +161,21 @@ def test_upgrade_granted_succeeds(harness): assert harness.charm.upgrade.state == "completed" -def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness): +def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness: Harness): with harness.hooks_disabled(): harness.set_leader(True) with ( patch( - "upgrade.KafkaUpgrade.zookeeper_current_version", + "events.upgrade.KafkaUpgrade.zookeeper_current_version", new_callable=PropertyMock, return_value="3.6", ), - patch("config.KafkaConfig.set_environment"), - patch.object(KafkaSnap, "stop_snap_service"), - patch.object(KafkaSnap, "restart_snap_service"), - patch.object(KafkaSnap, "install", return_value=True), + patch("workload.KafkaWorkload.stop"), + patch("workload.KafkaWorkload.restart"), + patch("workload.KafkaWorkload.install", return_value=True), patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), - patch.object(KafkaUpgrade, "on_upgrade_changed") as patched_upgrade, + patch("events.upgrade.KafkaUpgrade.on_upgrade_changed") as patched_upgrade, ): mock_event = MagicMock() harness.charm.upgrade._on_upgrade_granted(mock_event) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py deleted file mode 100644 index 0c51404f..00000000 --- a/tests/unit/test_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -from unittest.mock import patch - -from src.utils import map_env - -from utils import get_env, update_env - - -def test_map_env_populated(): - example_env = [ - "KAFKA_OPTS=orcs -Djava=wargs -Dkafka=goblins", - "SERVER_JVMFLAGS=dwarves -Djava=elves -Dzookeeper=men", - ] - env = map_env(env=example_env) - - assert len(env) == 2 - assert sorted(env.keys()) == sorted(["KAFKA_OPTS", "SERVER_JVMFLAGS"]) - - for value in env.values(): - assert isinstance(value, str) - # checks handles multiple equals signs in value - assert len(value.split()) == 3 - - -def test_map_env_empty_item(): - # we get this after reading the default /etc/environment from a stock 22.04 because of safe_get_file, - # see: https://github.com/verterok/zookeeper-operator/blob/fix-invalid-etc-env/src/utils.py#L44 - example_env = [ - 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin"', - "", - ] - env = map_env(env=example_env) - - assert len(env) == 1 - assert sorted(env.keys()) == sorted(["PATH"]) - - for value in env.values(): - assert isinstance(value, str) - - -def test_get_env_empty(): - with patch("utils.safe_get_file", return_value=[]): - assert not get_env() - assert get_env() == {} - - -def test_update_env(): - example_get_env = { - "KAFKA_OPTS": "orcs -Djava=wargs -Dkafka=goblins", - "SERVER_JVMFLAGS": "dwarves -Djava=elves -Dzookeeper=men", - } - example_update_env = { - "SERVER_JVMFLAGS": "gimli -Djava=legolas -Dzookeeper=aragorn", - } - - with ( - patch("utils.get_env", return_value=example_get_env), - patch("utils.safe_write_to_file") as safe_write, - ): - update_env(env=example_update_env) - - assert all( - updated in safe_write.call_args.kwargs["content"] - for updated in ["gimli", "legolas", "aragorn"] - ) - assert "KAFKA_OPTS" in safe_write.call_args.kwargs["content"] - assert safe_write.call_args.kwargs["path"] == "/etc/environment" - assert safe_write.call_args.kwargs["mode"] == "w" diff --git a/tests/unit/test_snap.py b/tests/unit/test_workload.py similarity index 55% rename from tests/unit/test_snap.py rename to tests/unit/test_workload.py index adc6c391..774cb88d 100644 --- a/tests/unit/test_snap.py +++ b/tests/unit/test_workload.py @@ -2,29 +2,21 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -import subprocess from unittest.mock import mock_open, patch import pytest from charms.operator_libs_linux.v1.snap import SnapError -from snap import KafkaSnap +from workload import KafkaWorkload -def test_run_bin_command_raises(): - """Checks failed snap command raises CalledProcessError.""" - with pytest.raises(subprocess.CalledProcessError): - KafkaSnap.run_bin_command("stuff", ["to"], ["fail"]) - - -def test_run_bin_command_args(): +def test_run_bin_command_args(patched_exec): """Checks KAFKA_OPTS env-var and zk-tls flag present in all snap commands.""" - with patch("subprocess.check_output") as patched: - KafkaSnap.run_bin_command(bin_keyword="configs", bin_args=["--list"], opts=["-Djava"]) + KafkaWorkload().run_bin_command(bin_keyword="configs", bin_args=["--list"], opts=["-Djava"]) - assert "charmed-kafka.configs" in patched.call_args.args[0].split() - assert "-Djava" == patched.call_args.args[0].split()[0] - assert "--list" == patched.call_args.args[0].split()[-1] + assert "charmed-kafka.configs" in patched_exec.call_args.args[0].split() + assert "-Djava" == patched_exec.call_args.args[0].split()[0] + assert "--list" == patched_exec.call_args.args[0].split()[-1] def test_get_service_pid_raises(): @@ -38,7 +30,7 @@ def test_get_service_pid_raises(): patch("subprocess.check_output", return_value="123"), pytest.raises(SnapError), ): - KafkaSnap().get_service_pid() + KafkaWorkload().get_service_pid() def test_get_service_pid_raises_no_pid(): @@ -47,4 +39,4 @@ def test_get_service_pid_raises_no_pid(): patch("subprocess.check_output", return_value=""), pytest.raises(SnapError), ): - KafkaSnap().get_service_pid() + KafkaWorkload().get_service_pid() diff --git a/tox.ini b/tox.ini index c145f505..2a9d973d 100644 --- a/tox.ini +++ b/tox.ini @@ -39,17 +39,17 @@ deps = [testenv:format] description = Apply coding style standards to code commands = - poetry install + poetry install --no-root poetry export -f requirements.txt -o requirements.txt --without-hashes - poetry install --only fmt + poetry install --no-root --only fmt poetry run ruff --fix {[vars]all_path} poetry run black {[vars]all_path} [testenv:lint] description = Check code against coding style standards commands = - poetry install --only lint + poetry install --no-root --only lint poetry run codespell {tox_root} \ --skip {tox_root}/.git \ --skip {tox_root}/.tox \ @@ -65,13 +65,13 @@ commands = poetry run ruff {[vars]all_path} poetry run black --check --diff {[vars]all_path} - poetry install + poetry install --no-root poetry run pyright [testenv:unit] description = Run unit tests commands = - poetry install --with unit + poetry install --no-root --with unit poetry run coverage run --source={[vars]src_path} \ -m pytest -vv --tb native -s {posargs} {[vars]tests_path}/unit poetry run coverage report @@ -83,7 +83,7 @@ pass_env = CI CI_PACKED_CHARMS commands = - poetry install --with integration + poetry install --no-root --with integration poetry run pytest -vv --tb native --log-cli-level=INFO -s {posargs} {[vars]tests_path}/integration/ [testenv:integration-{charm,provider,scaling,password-rotation,tls,upgrade,ha}] @@ -96,5 +96,5 @@ pass_env = {[testenv]pass_env} CI commands = - poetry install --with integration + poetry install --no-root --with integration poetry run pytest -vv --tb native --log-cli-level=INFO -s {posargs} {[vars]tests_path}/integration/{env:TEST_FILE}