diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0a18c2b7..541b3730 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -64,13 +64,14 @@ jobs: - integration-password-rotation - integration-tls - integration-upgrade + # - integration-balancer name: ${{ matrix.tox-environments }} needs: - lint - unit-test - build runs-on: ubuntu-latest - timeout-minutes: 120 + timeout-minutes: 240 steps: - name: Checkout uses: actions/checkout@v3 diff --git a/README.md b/README.md index 6e22591b..28052f3b 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ The Charmed Kafka Operator delivers automated operations management from day 0 t The Kafka Operator can be found on [Charmhub](https://charmhub.io/kafka) and it comes with production-ready features such as: - Fault-tolerance, replication, scalability and high-availability out-of-the-box. -- SASL/SCRAM auth for Broker-Broker and Client-Broker authenticaion enabled by default. +- SASL/SCRAM auth for Broker-Broker and Client-Broker authentication enabled by default. - Access control management supported with user-provided ACL lists. The Kafka Operator uses the latest upstream Kafka binaries released by The Apache Software Foundation that comes with Kafka, made available using the [`charmed-kafka` snap ](https://snapcraft.io/charmed-kafka) distributed by Canonical. diff --git a/actions.yaml b/actions.yaml index 69d9670e..9cf312c1 100644 --- a/actions.yaml +++ b/actions.yaml @@ -18,7 +18,7 @@ set-password: set-tls-private-key: description: Sets the private key identifying the target unit, which will be used for certificate signing requests (CSR). When updated, certificates will be reissued to the unit. - Run for each unit separately. Requires a valid relation to an application providing the `certificates` relation interface. + Run for each unit separately. Requires a valid relation to an application providing the `certificates` relation interface. params: internal-key: type: string @@ -33,3 +33,23 @@ get-admin-credentials: pre-upgrade-check: description: Run necessary pre-upgrade checks before executing a charm upgrade. + +rebalance: + description: Trigger a rebalance of cluster partitions based on configured goals + params: + mode: + type: string + description: The operation to issue to the balancer. This action must be called on the leader unit. + 'full' - runs a full rebalance of all partitions across the whole cluster + 'add' - evenly distributes replicas to new and available brokers + 'remove' - moves under-replicated partition replicas assigned to decommissioned brokers, to available ones + enum: [full, add, remove] + dryrun: + description: Only generate the partition rebalance proposals and estimated result, without executing + type: boolean + default: true + brokerid: + description: Broker ID newly added to the cluster or to be removed. The broker ID is the unit number, e.g. kafka/0 is broker 0. + type: integer + minimum: 0 + required: [mode] diff --git a/config.yaml b/config.yaml index dfe20958..fb02269f 100644 --- a/config.yaml +++ b/config.yaml @@ -2,6 +2,12 @@ # See LICENSE file for licensing details. options: + roles: + description: | + Comma separated list of the roles assigned to the nodes of this cluster. + This configuration accepts the following roles: 'broker' (standard functionality), 'balancer' (cruise control). + type: string + default: broker compression_type: description: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. type: string @@ -24,7 +30,7 @@ options: default: "-1" log_retention_ms: description: The number of milliseconds to keep a log file before deleting it (in milliseconds). - type: string + type: string default: "-1" log_segment_bytes: description: The maximum size of a single log file. @@ -48,11 +54,11 @@ options: default: false log_cleaner_delete_retention_ms: description: How long are delete records retained. - type: string + type: string default: "86400000" log_cleaner_min_compaction_lag_ms: description: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. - type: string + type: string default: "0" log_cleanup_policy: description: "The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. Valid policies are: 'delete' and 'compact'" @@ -79,14 +85,26 @@ options: type: string default: "" profile: - description: 'Profile representing the scope of deployment, and used to enable high-level customisation of sysconfigs, resource checks/allocation, warning levels, etc. Allowed values are: “production”, “staging” and “testing”' - type: string - default: production + description: "Profile representing the scope of deployment, and used to enable high-level customisation of sysconfigs, resource checks/allocation, warning levels, etc. Allowed values are: “production”, “staging” and “testing”" + type: string + default: production certificate_extra_sans: - description: Config options to add extra-sans to the ones used when requesting server certificates. The extra-sans are specified by comma-separated names to be added when requesting signed certificates. Use "{unit}" as a placeholder to be filled with the unit number, e.g. "worker-{unit}" will be translated as "worker-0" for unit 0 and "worker-1" for unit 1 when requesting the certificate. - type: string - default: "" + description: Config options to add extra-sans to the ones used when requesting server certificates. The extra-sans are specified by comma-separated names to be added when requesting signed certificates. Use "{unit}" as a placeholder to be filled with the unit number, e.g. "worker-{unit}" will be translated as "worker-0" for unit 0 and "worker-1" for unit 1 when requesting the certificate. + type: string + default: "" log_level: - description: 'Level of logging for the different components operated by the charm. Possible values: ERROR, WARNING, INFO, DEBUG' + description: "Level of logging for the different components operated by the charm. Possible values: ERROR, WARNING, INFO, DEBUG" type: string default: "INFO" + network_bandwidth: + description: The network bandwidth available for the cloud that the charm is deployed to, in KB. + type: int + default: 50000 + cruisecontrol_balance_threshold: + description: The maximum allowed extent of unbalance between brokers for cpu, disk and network utilization, and replica counts. For example, a value of `1.1` ensures that no broker should have >1.1x average utilization of all the brokers + type: float + default: 1.1 + cruisecontrol_capacity_threshold: + description: The maximum percentage of the total cpu, disk and network capacity that is allowed to be used on a broker. For example, a value of `0.8` ensures that no broker should have >80% utilization + type: float + default: 0.8 diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 5cb309b1..a2162aa0 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 35 +LIBPATCH = 38 PYDEPS = ["ops>=2.0.0"] @@ -642,8 +642,8 @@ def _move_to_new_label_if_needed(self): return # Create a new secret with the new label - old_meta = self._secret_meta content = self._secret_meta.get_content() + self._secret_uri = None # I wish we could just check if we are the owners of the secret... try: @@ -651,13 +651,17 @@ def _move_to_new_label_if_needed(self): except ModelError as err: if "this unit is not the leader" not in str(err): raise - old_meta.remove_all_revisions() + self.current_label = None def set_content(self, content: Dict[str, str]) -> None: """Setting cached secret content.""" if not self.meta: return + # DPE-4182: do not create new revision if the content stay the same + if content == self.get_content(): + return + if content: self._move_to_new_label_if_needed() self.meta.set_content(content) @@ -1586,7 +1590,7 @@ def _register_secret_to_relation( """ label = self._generate_secret_label(relation_name, relation_id, group) - # Fetchin the Secret's meta information ensuring that it's locally getting registered with + # Fetching the Secret's meta information ensuring that it's locally getting registered with CachedSecret(self._model, self.component, label, secret_id).meta def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): @@ -2309,7 +2313,7 @@ def _secrets(self) -> dict: return self._cached_secrets def _get_secret(self, group) -> Optional[Dict[str, str]]: - """Retrieveing secrets.""" + """Retrieving secrets.""" if not self.app: return if not self._secrets.get(group): @@ -2602,6 +2606,14 @@ def set_version(self, relation_id: int, version: str) -> None: """ self.update_relation_data(relation_id, {"version": version}) + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + class DatabaseProviderEventHandlers(EventHandlers): """Provider-side of the database relation handlers.""" @@ -2838,6 +2850,21 @@ def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + # Check which data has changed to emit customs events. diff = self._diff(event) diff --git a/lib/charms/data_platform_libs/v0/data_models.py b/lib/charms/data_platform_libs/v0/data_models.py index 8bc879e3..a1dbb829 100644 --- a/lib/charms/data_platform_libs/v0/data_models.py +++ b/lib/charms/data_platform_libs/v0/data_models.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Library to provide simple API for promoting typed, validated and structured dataclass in charms. +r"""Library to provide simple API for promoting typed, validated and structured dataclass in charms. Dict-like data structure are often used in charms. They are used for config, action parameters and databag. This library aims at providing simple API for using pydantic BaseModel-derived class @@ -168,15 +168,17 @@ class MergedDataBag(ProviderDataBag, RequirerDataBag): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 1 +LIBPATCH = 4 -PYDEPS = ["ops>=2.0.0", "pydantic>=1.10"] +PYDEPS = ["ops>=2.0.0", "pydantic>=1.10,<2"] G = TypeVar("G") T = TypeVar("T", bound=BaseModel) AppModel = TypeVar("AppModel", bound=BaseModel) UnitModel = TypeVar("UnitModel", bound=BaseModel) +DataBagNativeTypes = (int, str, float) + class BaseConfigModel(BaseModel): """Class to be used for defining the structured configuration options.""" @@ -231,10 +233,15 @@ def write(relation_data: RelationDataContent, model: BaseModel): relation_data: pointer to the relation databag model: instance of pydantic model to be written """ - for key, value in model.dict(exclude_none=True).items(): - relation_data[key.replace("_", "-")] = ( - str(value) if isinstance(value, str) or isinstance(value, int) else json.dumps(value) - ) + for key, value in model.dict(exclude_none=False).items(): + if value: + relation_data[key.replace("_", "-")] = ( + str(value) + if any(isinstance(value, _type) for _type in DataBagNativeTypes) + else json.dumps(value) + ) + else: + relation_data[key.replace("_", "-")] = "" def read(relation_data: MutableMapping[str, str], obj: Type[T]) -> T: @@ -248,10 +255,11 @@ def read(relation_data: MutableMapping[str, str], obj: Type[T]) -> T: **{ field_name: ( relation_data[parsed_key] - if field.type_ in [int, str, float] + if field.outer_type_ in DataBagNativeTypes else json.loads(relation_data[parsed_key]) ) for field_name, field in obj.__fields__.items() + # pyright: ignore[reportGeneralTypeIssues] if (parsed_key := field_name.replace("_", "-")) in relation_data if relation_data[parsed_key] } @@ -275,8 +283,8 @@ def decorator( [ CharmBase, RelationEvent, - Union[AppModel, ValidationError], - Union[UnitModel, ValidationError], + Optional[Union[AppModel, ValidationError]], + Optional[Union[UnitModel, ValidationError]], ], G, ] @@ -286,7 +294,7 @@ def event_wrapper(self: CharmBase, event: RelationEvent): try: app_data = ( read(event.relation.data[event.app], app_model) - if app_model is not None + if app_model is not None and event.app else None ) except pydantic.ValidationError as e: @@ -295,7 +303,7 @@ def event_wrapper(self: CharmBase, event: RelationEvent): try: unit_data = ( read(event.relation.data[event.unit], unit_model) - if unit_model is not None + if unit_model is not None and event.unit else None ) except pydantic.ValidationError as e: diff --git a/lib/charms/data_platform_libs/v0/upgrade.py b/lib/charms/data_platform_libs/v0/upgrade.py index 0db6f63b..4d909d64 100644 --- a/lib/charms/data_platform_libs/v0/upgrade.py +++ b/lib/charms/data_platform_libs/v0/upgrade.py @@ -263,8 +263,9 @@ def restart(self, event) -> None: import json import logging from abc import ABC, abstractmethod -from typing import List, Literal, Optional, Set, Tuple +from typing import Dict, List, Literal, Optional, Set, Tuple +import poetry.core.constraints.version as poetry_version from ops.charm import ( ActionEvent, CharmBase, @@ -284,199 +285,31 @@ def restart(self, event) -> None: # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 10 +LIBPATCH = 18 -PYDEPS = ["pydantic>=1.10,<2"] +PYDEPS = ["pydantic>=1.10,<2", "poetry-core"] logger = logging.getLogger(__name__) # --- DEPENDENCY RESOLUTION FUNCTIONS --- -def build_complete_sem_ver(version: str) -> list[int]: - """Builds complete major.minor.patch version from version string. - - Returns: - List of major.minor.patch version integers - """ - versions = [int(ver) if ver != "*" else 0 for ver in str(version).split(".")] - - # padding with 0s until complete major.minor.patch - return (versions + 3 * [0])[:3] - - -def verify_caret_requirements(version: str, requirement: str) -> bool: - """Verifies version requirements using carats. - - Args: - version: the version currently in use - requirement: the requirement version - - Returns: - True if `version` meets defined `requirement`. Otherwise False - """ - if not requirement.startswith("^"): - return True - - requirement = requirement[1:] - - sem_version = build_complete_sem_ver(version) - sem_requirement = build_complete_sem_ver(requirement) - - # caret uses first non-zero character, not enough to just count '.' - if sem_requirement[0] == 0: - max_version_index = requirement.count(".") - for i, semver in enumerate(sem_requirement): - if semver != 0: - max_version_index = i - break - else: - max_version_index = 0 - - for i in range(3): - # version higher than first non-zero - if (i <= max_version_index) and (sem_version[i] != sem_requirement[i]): - return False - - # version either higher or lower than first non-zero - if (i > max_version_index) and (sem_version[i] < sem_requirement[i]): - return False - - return True - - -def verify_tilde_requirements(version: str, requirement: str) -> bool: - """Verifies version requirements using tildes. - - Args: - version: the version currently in use - requirement: the requirement version - - Returns: - True if `version` meets defined `requirement`. Otherwise False - """ - if not requirement.startswith("~"): - return True - - requirement = requirement[1:] - - sem_version = build_complete_sem_ver(version) - sem_requirement = build_complete_sem_ver(requirement) - - max_version_index = min(1, requirement.count(".")) - - for i in range(3): - # version higher before requirement level - if (i < max_version_index) and (sem_version[i] > sem_requirement[i]): - return False - - # version either higher or lower at requirement level - if (i == max_version_index) and (sem_version[i] != sem_requirement[i]): - return False - - # version lower after requirement level - if (i > max_version_index) and (sem_version[i] < sem_requirement[i]): - return False - - # must be valid - return True - - -def verify_wildcard_requirements(version: str, requirement: str) -> bool: - """Verifies version requirements using wildcards. - - Args: - version: the version currently in use - requirement: the requirement version - - Returns: - True if `version` meets defined `requirement`. Otherwise False - """ - if "*" not in requirement: - return True - - sem_version = build_complete_sem_ver(version) - sem_requirement = build_complete_sem_ver(requirement) - - max_version_index = requirement.count(".") - - for i in range(3): - # version not the same before wildcard - if (i < max_version_index) and (sem_version[i] != sem_requirement[i]): - return False - - # version not higher after wildcard - if (i == max_version_index) and (sem_version[i] < sem_requirement[i]): - return False - - # must be valid - return True - - -def verify_inequality_requirements(version: str, requirement: str) -> bool: - """Verifies version requirements using inequalities. - - Args: - version: the version currently in use - requirement: the requirement version - - Returns: - True if `version` meets defined `requirement`. Otherwise False - """ - if not any(char for char in [">", ">="] if requirement.startswith(char)): - return True - - raw_requirement = requirement.replace(">", "").replace("=", "") - - sem_version = build_complete_sem_ver(version) - sem_requirement = build_complete_sem_ver(raw_requirement) - - max_version_index = raw_requirement.count(".") or 0 - - for i in range(3): - # valid at same requirement level - if ( - (i == max_version_index) - and ("=" in requirement) - and (sem_version[i] == sem_requirement[i]) - ): - return True - - # version not increased at any point - if sem_version[i] < sem_requirement[i]: - return False - - # valid - if sem_version[i] > sem_requirement[i]: - return True - - # must not be valid - return False - - def verify_requirements(version: str, requirement: str) -> bool: - """Verifies a specified version against defined requirements. + """Verifies a specified version against defined constraint. - Supports caret (`^`), tilde (`~`), wildcard (`*`) and greater-than inequalities (`>`, `>=`) + Supports Poetry version constraints + https://python-poetry.org/docs/dependency-specification/#version-constraints Args: version: the version currently in use - requirement: the requirement version + requirement: Poetry version constraint Returns: True if `version` meets defined `requirement`. Otherwise False """ - if not all( - [ - verify_inequality_requirements(version=version, requirement=requirement), - verify_caret_requirements(version=version, requirement=requirement), - verify_tilde_requirements(version=version, requirement=requirement), - verify_wildcard_requirements(version=version, requirement=requirement), - ] - ): - return False - - return True + return poetry_version.parse_constraint(requirement).allows( + poetry_version.Version.parse(version) + ) # --- DEPENDENCY MODEL TYPES --- @@ -513,7 +346,7 @@ class KafkaDependenciesModel(BaseModel): print(model.dict()) # exporting back validated deps """ - dependencies: dict[str, str] + dependencies: Dict[str, str] name: str upgrade_supported: str version: str @@ -521,19 +354,14 @@ class KafkaDependenciesModel(BaseModel): @validator("dependencies", "upgrade_supported", each_item=True) @classmethod def dependencies_validator(cls, value): - """Validates values with dependencies for multiple special characters.""" + """Validates version constraint.""" if isinstance(value, dict): deps = value.values() else: deps = [value] - chars = ["~", "^", ">", "*"] - for dep in deps: - if (count := sum([dep.count(char) for char in chars])) != 1: - raise ValueError( - f"Value uses greater than 1 special character (^ ~ > *). Found {count}." - ) + poetry_version.parse_constraint(dep) return value @@ -673,7 +501,7 @@ class DataUpgrade(Object, ABC): STATES = ["recovery", "failed", "idle", "ready", "upgrading", "completed"] - on = UpgradeEvents() # pyright: ignore [reportGeneralTypeIssues] + on = UpgradeEvents() # pyright: ignore [reportAssignmentType] def __init__( self, @@ -778,6 +606,21 @@ def upgrade_stack(self, stack: List[int]) -> None: self.peer_relation.data[self.charm.app].update({"upgrade-stack": json.dumps(stack)}) self._upgrade_stack = stack + @property + def other_unit_states(self) -> list: + """Current upgrade state for other units. + + Returns: + Unsorted list of upgrade states for other units. + """ + if not self.peer_relation: + return [] + + return [ + self.peer_relation.data[unit].get("state", "") + for unit in list(self.peer_relation.units) + ] + @property def unit_states(self) -> list: """Current upgrade state for all units. @@ -1064,9 +907,24 @@ def _on_upgrade_charm(self, event: UpgradeCharmEvent) -> None: logger.error(e) self.set_unit_failed() return + top_unit_id = self.upgrade_stack[-1] + top_unit = self.charm.model.get_unit(f"{self.charm.app.name}/{top_unit_id}") + if ( + top_unit == self.charm.unit + and self.peer_relation.data[self.charm.unit].get("state") == "recovery" + ): + # While in a rollback and the Juju leader unit is the top unit in the upgrade stack, emit the event + # for this unit to start the rollback. + self.peer_relation.data[self.charm.unit].update({"state": "ready"}) + self.on_upgrade_changed(event) + return self.charm.unit.status = WaitingStatus("other units upgrading first...") self.peer_relation.data[self.charm.unit].update({"state": "ready"}) + if len(self.app_units) == 1: + # single unit upgrade, emit upgrade_granted event right away + getattr(self.on, "upgrade_granted").emit() + else: # for k8s run version checks only on highest ordinal unit if ( @@ -1093,9 +951,9 @@ def on_upgrade_changed(self, event: EventBase) -> None: logger.debug("Cluster failed to upgrade, exiting...") return - if self.cluster_state == "recovery": - logger.debug("Cluster in recovery, deferring...") - event.defer() + if self.substrate == "vm" and self.cluster_state == "recovery": + # skip run while in recovery. The event will be retrigged when the cluster is ready + logger.debug("Cluster in recovery, skip...") return # if all units completed, mark as complete @@ -1116,8 +974,7 @@ def on_upgrade_changed(self, event: EventBase) -> None: logger.debug("upgrade-changed event handled before pre-checks, exiting...") return - logger.debug("Did not find upgrade-stack or completed cluster state, deferring...") - event.defer() + logger.debug("Did not find upgrade-stack or completed cluster state, skipping...") return # upgrade ongoing, set status for waiting units @@ -1147,6 +1004,7 @@ def on_upgrade_changed(self, event: EventBase) -> None: self.charm.unit == top_unit and top_state in ["ready", "upgrading"] and self.cluster_state == "ready" + and "upgrading" not in self.other_unit_states ): logger.debug( f"{top_unit.name} is next to upgrade, emitting `upgrade_granted` event and upgrading..." diff --git a/metadata.yaml b/metadata.yaml index 08f564e0..0595f309 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -3,7 +3,7 @@ name: kafka display-name: Charmed Kafka description: | - Kafka is an event streaming platform. This charm deploys and operates Kafka on + Kafka is an event streaming platform. This charm deploys and operates Kafka on a VM machines environment. summary: Charmed Kafka Operator docs: https://discourse.charmhub.io/t/charmed-kafka-documentation/10288 @@ -28,6 +28,15 @@ peers: upgrade: interface: upgrade +provides: + kafka-client: + interface: kafka_client + cos-agent: + interface: cos_agent + peer-cluster-orchestrator: + interface: peer_cluster + limit: 1 + requires: zookeeper: interface: zookeeper @@ -42,22 +51,19 @@ requires: trusted-certificate: interface: tls-certificates optional: true + peer-cluster: + interface: peer_cluster + optional: true oauth: interface: oauth limit: 1 optional: true -provides: - kafka-client: - interface: kafka_client - cos-agent: - interface: cos_agent - storage: data: type: filesystem description: Directories where the log data is stored - minimum-size: 10G + minimum-size: 1G location: /var/snap/charmed-kafka/common/var/lib/kafka multiple: range: 1- diff --git a/poetry.lock b/poetry.lock index da1bda01..41619a19 100644 --- a/poetry.lock +++ b/poetry.lock @@ -61,38 +61,38 @@ files = [ [[package]] name = "bcrypt" -version = "4.1.2" +version = "4.1.3" description = "Modern password hashing for your software and your servers" optional = false python-versions = ">=3.7" files = [ - {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"}, - {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"}, - {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"}, - {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"}, - {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"}, - {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"}, - {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"}, - {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"}, - {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"}, - {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"}, - {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"}, + {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"}, + {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"}, + {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"}, + {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"}, + {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"}, + {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"}, + {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"}, ] [package.extras] @@ -147,13 +147,13 @@ files = [ [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -335,13 +335,13 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "codespell" -version = "2.2.6" +version = "2.3.0" description = "Codespell" optional = false python-versions = ">=3.8" files = [ - {file = "codespell-2.2.6-py3-none-any.whl", hash = "sha256:9ee9a3e5df0990604013ac2a9f22fa8e57669c827124a2e961fe8a1da4cacc07"}, - {file = "codespell-2.2.6.tar.gz", hash = "sha256:a8c65d8eb3faa03deabab6b3bbe798bea72e1799c7e9e955d57eca4096abcff9"}, + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, ] [package.extras] @@ -363,13 +363,13 @@ files = [ [[package]] name = "cosl" -version = "0.0.11" +version = "0.0.12" description = "Utils for COS Lite charms" optional = false python-versions = ">=3.8" files = [ - {file = "cosl-0.0.11-py3-none-any.whl", hash = "sha256:46d78d6441ba628bae386cd8c10b8144558ab208115522020e7858f97837988d"}, - {file = "cosl-0.0.11.tar.gz", hash = "sha256:15cac6ed20b65e9d33cda3c3da32e299c82f9feea64e393448cd3d3cf2bef32a"}, + {file = "cosl-0.0.12-py3-none-any.whl", hash = "sha256:4efa647c251c0a5e53016833ccffbba3899c0a64f0a81ba0e8e8a5f8e080032a"}, + {file = "cosl-0.0.12.tar.gz", hash = "sha256:6c6eefb3025dd49e526e99d09cde574a235ac6d0563e80c271d21cf50dd510bf"}, ] [package.dependencies] @@ -379,63 +379,63 @@ typing-extensions = "*" [[package]] name = "coverage" -version = "7.5.0" +version = "7.5.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c"}, - {file = "coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b"}, - {file = "coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932"}, - {file = "coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3"}, - {file = "coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517"}, - {file = "coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a"}, - {file = "coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880"}, - {file = "coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58"}, - {file = "coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4"}, - {file = "coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a"}, - {file = "coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375"}, - {file = "coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb"}, - {file = "coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95"}, - {file = "coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d"}, - {file = "coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743"}, - {file = "coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1"}, - {file = "coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de"}, - {file = "coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff"}, - {file = "coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d"}, - {file = "coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656"}, - {file = "coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9"}, - {file = "coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64"}, - {file = "coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af"}, - {file = "coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc"}, - {file = "coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2"}, - {file = "coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1"}, - {file = "coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb"}, - {file = "coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2"}, - {file = "coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4"}, - {file = "coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475"}, - {file = "coverage-7.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9dd88fce54abbdbf4c42fb1fea0e498973d07816f24c0e27a1ecaf91883ce69e"}, - {file = "coverage-7.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a898c11dca8f8c97b467138004a30133974aacd572818c383596f8d5b2eb04a9"}, - {file = "coverage-7.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07dfdd492d645eea1bd70fb1d6febdcf47db178b0d99161d8e4eed18e7f62fe7"}, - {file = "coverage-7.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3d117890b6eee85887b1eed41eefe2e598ad6e40523d9f94c4c4b213258e4a4"}, - {file = "coverage-7.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6afd2e84e7da40fe23ca588379f815fb6dbbb1b757c883935ed11647205111cb"}, - {file = "coverage-7.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9960dd1891b2ddf13a7fe45339cd59ecee3abb6b8326d8b932d0c5da208104f"}, - {file = "coverage-7.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ced268e82af993d7801a9db2dbc1d2322e786c5dc76295d8e89473d46c6b84d4"}, - {file = "coverage-7.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7c211f25777746d468d76f11719e64acb40eed410d81c26cefac641975beb88"}, - {file = "coverage-7.5.0-cp38-cp38-win32.whl", hash = "sha256:262fffc1f6c1a26125d5d573e1ec379285a3723363f3bd9c83923c9593a2ac25"}, - {file = "coverage-7.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:eed462b4541c540d63ab57b3fc69e7d8c84d5957668854ee4e408b50e92ce26a"}, - {file = "coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1"}, - {file = "coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5"}, - {file = "coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631"}, - {file = "coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46"}, - {file = "coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e"}, - {file = "coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be"}, - {file = "coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b"}, - {file = "coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0"}, - {file = "coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7"}, - {file = "coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493"}, - {file = "coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067"}, - {file = "coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"}, + {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"}, + {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"}, + {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"}, + {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"}, + {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"}, + {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"}, + {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"}, + {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"}, + {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"}, + {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"}, + {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"}, + {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"}, ] [package.dependencies] @@ -446,43 +446,43 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "42.0.5" +version = "42.0.8" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, - {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, - {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, - {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, - {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, - {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, - {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, + {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, + {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, + {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, + {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, + {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, + {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, ] [package.dependencies] @@ -539,13 +539,13 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "google-auth" -version = "2.29.0" +version = "2.31.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"}, - {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"}, + {file = "google-auth-2.31.0.tar.gz", hash = "sha256:87805c36970047247c8afe614d4e3af8eceafc1ebba0c679fe75ddd1d575e871"}, + {file = "google_auth-2.31.0-py2.py3-none-any.whl", hash = "sha256:042c4702efa9f7d3c48d3a69341c209381b125faa6dbf3ebe56bc7e40ae05c23"}, ] [package.dependencies] @@ -562,13 +562,13 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "hvac" -version = "2.2.0" +version = "2.3.0" description = "HashiCorp Vault API client" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "hvac-2.2.0-py3-none-any.whl", hash = "sha256:f287a19940c6fc518c723f8276cc9927f7400734303ee5872ac2e84539466d8d"}, - {file = "hvac-2.2.0.tar.gz", hash = "sha256:e4b0248c5672cb9a6f5974e7c8f5271a09c6c663cbf8ab11733a227f3d2db2c2"}, + {file = "hvac-2.3.0-py3-none-any.whl", hash = "sha256:a3afc5710760b6ee9b3571769df87a0333da45da05a5f9f963e1d3925a84be7d"}, + {file = "hvac-2.3.0.tar.gz", hash = "sha256:1b85e3320e8642dd82f234db63253cda169a817589e823713dc5fca83119b1e2"}, ] [package.dependencies] @@ -693,13 +693,13 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] [[package]] name = "jinja2" -version = "3.1.3" +version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, - {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, ] [package.dependencies] @@ -748,12 +748,12 @@ referencing = ">=0.31.0" [[package]] name = "juju" -version = "3.4.0.0" +version = "3.5.0.0" description = "Python library for Juju" optional = false python-versions = "*" files = [ - {file = "juju-3.4.0.0.tar.gz", hash = "sha256:5b883446ca0977c1255b0876ed5d2eab01cffaf03a8c77cfd768975264abef3d"}, + {file = "juju-3.5.0.0.tar.gz", hash = "sha256:c69fbe63cb12991690787ce3d70812390bf3ca62b6c5e9ef15df00c1f03dd7e6"}, ] [package.dependencies] @@ -806,13 +806,13 @@ typing = ["mypy (>=0.991)"] [[package]] name = "kubernetes" -version = "29.0.0" +version = "30.1.0" description = "Kubernetes python client" optional = false python-versions = ">=3.6" files = [ - {file = "kubernetes-29.0.0-py2.py3-none-any.whl", hash = "sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e"}, - {file = "kubernetes-29.0.0.tar.gz", hash = "sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459"}, + {file = "kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d"}, + {file = "kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc"}, ] [package.dependencies] @@ -945,18 +945,15 @@ files = [ [[package]] name = "nodeenv" -version = "1.8.0" +version = "1.9.1" description = "Node.js virtual environment builder" optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ - {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, - {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, ] -[package.dependencies] -setuptools = "*" - [[package]] name = "oauthlib" version = "3.2.2" @@ -975,13 +972,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "ops" -version = "2.13.0" +version = "2.14.1" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" files = [ - {file = "ops-2.13.0-py3-none-any.whl", hash = "sha256:edebef03841d727a9b8bd9ee3f52c5b94070fd748641a0927b51f6fe3a887365"}, - {file = "ops-2.13.0.tar.gz", hash = "sha256:106deec8c18a6dbf7fa3e6fe6e288784b1da8cb626b5265f6c4b959e10877272"}, + {file = "ops-2.14.1-py3-none-any.whl", hash = "sha256:2ae45bf2442a0c814d1abffa25b103097088582b4fba4ea2c1d313828e278948"}, + {file = "ops-2.14.1.tar.gz", hash = "sha256:2fc5b6aa63efb71b510a946f764c9321acec5e30b9ddc64ce88c6cd4f753a19c"}, ] [package.dependencies] @@ -991,15 +988,30 @@ websocket-client = "==1.*" [package.extras] docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "pyspelling", "sphinx (==6.2.1)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] +[[package]] +name = "ops-scenario" +version = "6.1.1" +description = "Python library providing a state-transition testing API for Operator Framework charms." +optional = false +python-versions = ">=3.8" +files = [ + {file = "ops_scenario-6.1.1-py3-none-any.whl", hash = "sha256:358b321c8091b1c1e2ec7df1014c6052416b9bf4d6259ea8f35bc90caa727340"}, + {file = "ops_scenario-6.1.1.tar.gz", hash = "sha256:55d218cd4ea08afcdb2fc9e41a3364a3c1541e3a7c93ab14b2b7fc96e51a91de"}, +] + +[package.dependencies] +ops = ">=2.10" +PyYAML = ">=6.0.1" + [[package]] name = "packaging" -version = "24.0" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] @@ -1087,13 +1099,13 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.1" +version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, - {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] @@ -1118,13 +1130,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "prompt-toolkit" -version = "3.0.43" +version = "3.0.47" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, - {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, + {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, + {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, ] [package.dependencies] @@ -1132,22 +1144,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "5.26.1" +version = "5.27.2" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.26.1-cp310-abi3-win32.whl", hash = "sha256:3c388ea6ddfe735f8cf69e3f7dc7611e73107b60bdfcf5d0f024c3ccd3794e23"}, - {file = "protobuf-5.26.1-cp310-abi3-win_amd64.whl", hash = "sha256:e6039957449cb918f331d32ffafa8eb9255769c96aa0560d9a5bf0b4e00a2a33"}, - {file = "protobuf-5.26.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:38aa5f535721d5bb99861166c445c4105c4e285c765fbb2ac10f116e32dcd46d"}, - {file = "protobuf-5.26.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:fbfe61e7ee8c1860855696e3ac6cfd1b01af5498facc6834fcc345c9684fb2ca"}, - {file = "protobuf-5.26.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:f7417703f841167e5a27d48be13389d52ad705ec09eade63dfc3180a959215d7"}, - {file = "protobuf-5.26.1-cp38-cp38-win32.whl", hash = "sha256:d693d2504ca96750d92d9de8a103102dd648fda04540495535f0fec7577ed8fc"}, - {file = "protobuf-5.26.1-cp38-cp38-win_amd64.whl", hash = "sha256:9b557c317ebe6836835ec4ef74ec3e994ad0894ea424314ad3552bc6e8835b4e"}, - {file = "protobuf-5.26.1-cp39-cp39-win32.whl", hash = "sha256:b9ba3ca83c2e31219ffbeb9d76b63aad35a3eb1544170c55336993d7a18ae72c"}, - {file = "protobuf-5.26.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ee014c2c87582e101d6b54260af03b6596728505c79f17c8586e7523aaa8f8c"}, - {file = "protobuf-5.26.1-py3-none-any.whl", hash = "sha256:da612f2720c0183417194eeaa2523215c4fcc1a1949772dc65f05047e08d5932"}, - {file = "protobuf-5.26.1.tar.gz", hash = "sha256:8ca2a1d97c290ec7b16e4e5dff2e5ae150cc1582f55b5ab300d45cb0dfa90e51"}, + {file = "protobuf-5.27.2-cp310-abi3-win32.whl", hash = "sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38"}, + {file = "protobuf-5.27.2-cp310-abi3-win_amd64.whl", hash = "sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505"}, + {file = "protobuf-5.27.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5"}, + {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b"}, + {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e"}, + {file = "protobuf-5.27.2-cp38-cp38-win32.whl", hash = "sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863"}, + {file = "protobuf-5.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6"}, + {file = "protobuf-5.27.2-cp39-cp39-win32.whl", hash = "sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca"}, + {file = "protobuf-5.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce"}, + {file = "protobuf-5.27.2-py3-none-any.whl", hash = "sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470"}, + {file = "protobuf-5.27.2.tar.gz", hash = "sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714"}, ] [[package]] @@ -1227,47 +1239,54 @@ files = [ [[package]] name = "pydantic" -version = "1.10.15" +version = "1.10.17" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, - {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, - {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50f1666a9940d3d68683c9d96e39640f709d7a72ff8702987dab1761036206bb"}, - {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82790d4753ee5d00739d6cb5cf56bceb186d9d6ce134aca3ba7befb1eedbc2c8"}, - {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d207d5b87f6cbefbdb1198154292faee8017d7495a54ae58db06762004500d00"}, - {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e49db944fad339b2ccb80128ffd3f8af076f9f287197a480bf1e4ca053a866f0"}, - {file = "pydantic-1.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:d3b5c4cbd0c9cb61bbbb19ce335e1f8ab87a811f6d589ed52b0254cf585d709c"}, - {file = "pydantic-1.10.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3d5731a120752248844676bf92f25a12f6e45425e63ce22e0849297a093b5b0"}, - {file = "pydantic-1.10.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c365ad9c394f9eeffcb30a82f4246c0006417f03a7c0f8315d6211f25f7cb654"}, - {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3287e1614393119c67bd4404f46e33ae3be3ed4cd10360b48d0a4459f420c6a3"}, - {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be51dd2c8596b25fe43c0a4a59c2bee4f18d88efb8031188f9e7ddc6b469cf44"}, - {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6a51a1dd4aa7b3f1317f65493a182d3cff708385327c1c82c81e4a9d6d65b2e4"}, - {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4e316e54b5775d1eb59187f9290aeb38acf620e10f7fd2f776d97bb788199e53"}, - {file = "pydantic-1.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:0d142fa1b8f2f0ae11ddd5e3e317dcac060b951d605fda26ca9b234b92214986"}, - {file = "pydantic-1.10.15-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7ea210336b891f5ea334f8fc9f8f862b87acd5d4a0cbc9e3e208e7aa1775dabf"}, - {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3453685ccd7140715e05f2193d64030101eaad26076fad4e246c1cc97e1bb30d"}, - {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bea1f03b8d4e8e86702c918ccfd5d947ac268f0f0cc6ed71782e4b09353b26f"}, - {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:005655cabc29081de8243126e036f2065bd7ea5b9dff95fde6d2c642d39755de"}, - {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:af9850d98fc21e5bc24ea9e35dd80a29faf6462c608728a110c0a30b595e58b7"}, - {file = "pydantic-1.10.15-cp37-cp37m-win_amd64.whl", hash = "sha256:d31ee5b14a82c9afe2bd26aaa405293d4237d0591527d9129ce36e58f19f95c1"}, - {file = "pydantic-1.10.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5e09c19df304b8123938dc3c53d3d3be6ec74b9d7d0d80f4f4b5432ae16c2022"}, - {file = "pydantic-1.10.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac9237cd62947db00a0d16acf2f3e00d1ae9d3bd602b9c415f93e7a9fc10528"}, - {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:584f2d4c98ffec420e02305cf675857bae03c9d617fcfdc34946b1160213a948"}, - {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbc6989fad0c030bd70a0b6f626f98a862224bc2b1e36bfc531ea2facc0a340c"}, - {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d573082c6ef99336f2cb5b667b781d2f776d4af311574fb53d908517ba523c22"}, - {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6bd7030c9abc80134087d8b6e7aa957e43d35714daa116aced57269a445b8f7b"}, - {file = "pydantic-1.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:3350f527bb04138f8aff932dc828f154847fbdc7a1a44c240fbfff1b57f49a12"}, - {file = "pydantic-1.10.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51d405b42f1b86703555797270e4970a9f9bd7953f3990142e69d1037f9d9e51"}, - {file = "pydantic-1.10.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a980a77c52723b0dc56640ced396b73a024d4b74f02bcb2d21dbbac1debbe9d0"}, - {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67f1a1fb467d3f49e1708a3f632b11c69fccb4e748a325d5a491ddc7b5d22383"}, - {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:676ed48f2c5bbad835f1a8ed8a6d44c1cd5a21121116d2ac40bd1cd3619746ed"}, - {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:92229f73400b80c13afcd050687f4d7e88de9234d74b27e6728aa689abcf58cc"}, - {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2746189100c646682eff0bce95efa7d2e203420d8e1c613dc0c6b4c1d9c1fde4"}, - {file = "pydantic-1.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:394f08750bd8eaad714718812e7fab615f873b3cdd0b9d84e76e51ef3b50b6b7"}, - {file = "pydantic-1.10.15-py3-none-any.whl", hash = "sha256:28e552a060ba2740d0d2aabe35162652c1459a0b9069fe0db7f4ee0e18e74d58"}, - {file = "pydantic-1.10.15.tar.gz", hash = "sha256:ca832e124eda231a60a041da4f013e3ff24949d94a01154b137fc2f2a43c3ffb"}, + {file = "pydantic-1.10.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fa51175313cc30097660b10eec8ca55ed08bfa07acbfe02f7a42f6c242e9a4b"}, + {file = "pydantic-1.10.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7e8988bb16988890c985bd2093df9dd731bfb9d5e0860db054c23034fab8f7a"}, + {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:371dcf1831f87c9e217e2b6a0c66842879a14873114ebb9d0861ab22e3b5bb1e"}, + {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4866a1579c0c3ca2c40575398a24d805d4db6cb353ee74df75ddeee3c657f9a7"}, + {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:543da3c6914795b37785703ffc74ba4d660418620cc273490d42c53949eeeca6"}, + {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7623b59876f49e61c2e283551cc3647616d2fbdc0b4d36d3d638aae8547ea681"}, + {file = "pydantic-1.10.17-cp310-cp310-win_amd64.whl", hash = "sha256:409b2b36d7d7d19cd8310b97a4ce6b1755ef8bd45b9a2ec5ec2b124db0a0d8f3"}, + {file = "pydantic-1.10.17-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fa43f362b46741df8f201bf3e7dff3569fa92069bcc7b4a740dea3602e27ab7a"}, + {file = "pydantic-1.10.17-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a72d2a5ff86a3075ed81ca031eac86923d44bc5d42e719d585a8eb547bf0c9b"}, + {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ad32aed3bf5eea5ca5decc3d1bbc3d0ec5d4fbcd72a03cdad849458decbc63"}, + {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb4e741782e236ee7dc1fb11ad94dc56aabaf02d21df0e79e0c21fe07c95741"}, + {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d2f89a719411cb234105735a520b7c077158a81e0fe1cb05a79c01fc5eb59d3c"}, + {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db3b48d9283d80a314f7a682f7acae8422386de659fffaba454b77a083c3937d"}, + {file = "pydantic-1.10.17-cp311-cp311-win_amd64.whl", hash = "sha256:9c803a5113cfab7bbb912f75faa4fc1e4acff43e452c82560349fff64f852e1b"}, + {file = "pydantic-1.10.17-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:820ae12a390c9cbb26bb44913c87fa2ff431a029a785642c1ff11fed0a095fcb"}, + {file = "pydantic-1.10.17-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c1e51d1af306641b7d1574d6d3307eaa10a4991542ca324f0feb134fee259815"}, + {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e53fb834aae96e7b0dadd6e92c66e7dd9cdf08965340ed04c16813102a47fab"}, + {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e2495309b1266e81d259a570dd199916ff34f7f51f1b549a0d37a6d9b17b4dc"}, + {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:098ad8de840c92ea586bf8efd9e2e90c6339d33ab5c1cfbb85be66e4ecf8213f"}, + {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:525bbef620dac93c430d5d6bdbc91bdb5521698d434adf4434a7ef6ffd5c4b7f"}, + {file = "pydantic-1.10.17-cp312-cp312-win_amd64.whl", hash = "sha256:6654028d1144df451e1da69a670083c27117d493f16cf83da81e1e50edce72ad"}, + {file = "pydantic-1.10.17-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c87cedb4680d1614f1d59d13fea353faf3afd41ba5c906a266f3f2e8c245d655"}, + {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11289fa895bcbc8f18704efa1d8020bb9a86314da435348f59745473eb042e6b"}, + {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94833612d6fd18b57c359a127cbfd932d9150c1b72fea7c86ab58c2a77edd7c7"}, + {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d4ecb515fa7cb0e46e163ecd9d52f9147ba57bc3633dca0e586cdb7a232db9e3"}, + {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7017971ffa7fd7808146880aa41b266e06c1e6e12261768a28b8b41ba55c8076"}, + {file = "pydantic-1.10.17-cp37-cp37m-win_amd64.whl", hash = "sha256:e840e6b2026920fc3f250ea8ebfdedf6ea7a25b77bf04c6576178e681942ae0f"}, + {file = "pydantic-1.10.17-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bfbb18b616abc4df70591b8c1ff1b3eabd234ddcddb86b7cac82657ab9017e33"}, + {file = "pydantic-1.10.17-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebb249096d873593e014535ab07145498957091aa6ae92759a32d40cb9998e2e"}, + {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c209af63ccd7b22fba94b9024e8b7fd07feffee0001efae50dd99316b27768"}, + {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b40c9e13a0b61583e5599e7950490c700297b4a375b55b2b592774332798b7"}, + {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c31d281c7485223caf6474fc2b7cf21456289dbaa31401844069b77160cab9c7"}, + {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae5184e99a060a5c80010a2d53c99aee76a3b0ad683d493e5f0620b5d86eeb75"}, + {file = "pydantic-1.10.17-cp38-cp38-win_amd64.whl", hash = "sha256:ad1e33dc6b9787a6f0f3fd132859aa75626528b49cc1f9e429cdacb2608ad5f0"}, + {file = "pydantic-1.10.17-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17c0ee7192e54a10943f245dc79e36d9fe282418ea05b886e1c666063a7b54"}, + {file = "pydantic-1.10.17-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cafb9c938f61d1b182dfc7d44a7021326547b7b9cf695db5b68ec7b590214773"}, + {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ef534e3c22e5abbdbdd6f66b6ea9dac3ca3e34c5c632894f8625d13d084cbe"}, + {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d96b8799ae3d782df7ec9615cb59fc32c32e1ed6afa1b231b0595f6516e8ab"}, + {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ab2f976336808fd5d539fdc26eb51f9aafc1f4b638e212ef6b6f05e753c8011d"}, + {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8ad363330557beac73159acfbeed220d5f1bfcd6b930302a987a375e02f74fd"}, + {file = "pydantic-1.10.17-cp39-cp39-win_amd64.whl", hash = "sha256:48db882e48575ce4b39659558b2f9f37c25b8d348e37a2b4e32971dd5a7d6227"}, + {file = "pydantic-1.10.17-py3-none-any.whl", hash = "sha256:e41b5b973e5c64f674b3b4720286ded184dcc26a691dd55f34391c62c6934688"}, + {file = "pydantic-1.10.17.tar.gz", hash = "sha256:f434160fb14b353caf634149baaf847206406471ba70e64657c1e8330277a991"}, ] [package.dependencies] @@ -1279,17 +1298,16 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygments" -version = "2.17.2" +version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] [package.extras] -plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] [[package]] @@ -1349,13 +1367,13 @@ pytz = "*" [[package]] name = "pyright" -version = "1.1.360" +version = "1.1.370" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.360-py3-none-any.whl", hash = "sha256:7637f75451ac968b7cf1f8c51cfefb6d60ac7d086eb845364bc8ac03a026efd7"}, - {file = "pyright-1.1.360.tar.gz", hash = "sha256:784ddcda9745e9f5610483d7b963e9aa8d4f50d7755a9dffb28ccbeb27adce32"}, + {file = "pyright-1.1.370-py3-none-any.whl", hash = "sha256:fc721601e480a69989775bfc210534a6ca0110ebd0c065244a8d3a151294fc61"}, + {file = "pyright-1.1.370.tar.gz", hash = "sha256:d0d559d506fc41e3297f721aaa05a1b9f06beda5acc9ac64ca371ce94c28f960"}, ] [package.dependencies] @@ -1367,13 +1385,13 @@ dev = ["twine (>=3.4.1)"] [[package]] name = "pytest" -version = "8.2.0" +version = "8.2.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, - {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, + {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, + {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, ] [package.dependencies] @@ -1546,13 +1564,13 @@ files = [ [[package]] name = "referencing" -version = "0.35.0" +version = "0.35.1" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.35.0-py3-none-any.whl", hash = "sha256:8080727b30e364e5783152903672df9b6b091c926a146a759080b62ca3126cd6"}, - {file = "referencing-0.35.0.tar.gz", hash = "sha256:191e936b0c696d0af17ad7430a3dc68e88bc11be6514f4757dc890f04ab05889"}, + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, ] [package.dependencies] @@ -1561,13 +1579,13 @@ rpds-py = ">=0.7.0" [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -1600,110 +1618,110 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] [[package]] name = "rpds-py" -version = "0.18.0" +version = "0.18.1" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, - {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, - {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, - {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, - {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, - {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, - {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, - {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, - {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, - {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, - {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, - {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, - {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, - {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, - {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, - {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, - {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, - {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, - {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, - {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, - {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, - {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, - {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, - {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, - {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, - {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, - {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, - {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, - {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, - {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, - {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, - {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, - {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, - {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, - {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, - {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, - {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, - {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, - {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, - {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, - {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, - {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, - {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, - {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, + {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, + {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, + {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, + {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, + {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, + {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, + {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, + {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, + {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, + {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, + {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, + {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, + {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, + {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, + {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, + {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, + {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, + {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, + {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, + {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, + {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, ] [[package]] @@ -1722,46 +1740,31 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.4.2" +version = "0.5.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.4.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:8d14dc8953f8af7e003a485ef560bbefa5f8cc1ad994eebb5b12136049bbccc5"}, - {file = "ruff-0.4.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:24016ed18db3dc9786af103ff49c03bdf408ea253f3cb9e3638f39ac9cf2d483"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2e06459042ac841ed510196c350ba35a9b24a643e23db60d79b2db92af0c2b"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3afabaf7ba8e9c485a14ad8f4122feff6b2b93cc53cd4dad2fd24ae35112d5c5"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:799eb468ea6bc54b95527143a4ceaf970d5aa3613050c6cff54c85fda3fde480"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ec4ba9436a51527fb6931a8839af4c36a5481f8c19e8f5e42c2f7ad3a49f5069"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6a2243f8f434e487c2a010c7252150b1fdf019035130f41b77626f5655c9ca22"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8772130a063f3eebdf7095da00c0b9898bd1774c43b336272c3e98667d4fb8fa"}, - {file = "ruff-0.4.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ab165ef5d72392b4ebb85a8b0fbd321f69832a632e07a74794c0e598e7a8376"}, - {file = "ruff-0.4.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1f32cadf44c2020e75e0c56c3408ed1d32c024766bd41aedef92aa3ca28eef68"}, - {file = "ruff-0.4.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:22e306bf15e09af45ca812bc42fa59b628646fa7c26072555f278994890bc7ac"}, - {file = "ruff-0.4.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82986bb77ad83a1719c90b9528a9dd663c9206f7c0ab69282af8223566a0c34e"}, - {file = "ruff-0.4.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:652e4ba553e421a6dc2a6d4868bc3b3881311702633eb3672f9f244ded8908cd"}, - {file = "ruff-0.4.2-py3-none-win32.whl", hash = "sha256:7891ee376770ac094da3ad40c116258a381b86c7352552788377c6eb16d784fe"}, - {file = "ruff-0.4.2-py3-none-win_amd64.whl", hash = "sha256:5ec481661fb2fd88a5d6cf1f83403d388ec90f9daaa36e40e2c003de66751798"}, - {file = "ruff-0.4.2-py3-none-win_arm64.whl", hash = "sha256:cbd1e87c71bca14792948c4ccb51ee61c3296e164019d2d484f3eaa2d360dfaf"}, - {file = "ruff-0.4.2.tar.gz", hash = "sha256:33bcc160aee2520664bc0859cfeaebc84bb7323becff3f303b8f1f2d81cb4edc"}, -] - -[[package]] -name = "setuptools" -version = "69.5.1" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, - {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, + {file = "ruff-0.5.1-py3-none-linux_armv6l.whl", hash = "sha256:6ecf968fcf94d942d42b700af18ede94b07521bd188aaf2cd7bc898dd8cb63b6"}, + {file = "ruff-0.5.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:204fb0a472f00f2e6280a7c8c7c066e11e20e23a37557d63045bf27a616ba61c"}, + {file = "ruff-0.5.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d235968460e8758d1e1297e1de59a38d94102f60cafb4d5382033c324404ee9d"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38beace10b8d5f9b6bdc91619310af6d63dd2019f3fb2d17a2da26360d7962fa"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e478d2f09cf06add143cf8c4540ef77b6599191e0c50ed976582f06e588c994"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0368d765eec8247b8550251c49ebb20554cc4e812f383ff9f5bf0d5d94190b0"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3a9a9a1b582e37669b0138b7c1d9d60b9edac880b80eb2baba6d0e566bdeca4d"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdd9f723e16003623423affabcc0a807a66552ee6a29f90eddad87a40c750b78"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:be9fd62c1e99539da05fcdc1e90d20f74aec1b7a1613463ed77870057cd6bd96"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e216fc75a80ea1fbd96af94a6233d90190d5b65cc3d5dfacf2bd48c3e067d3e1"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c4c2112e9883a40967827d5c24803525145e7dab315497fae149764979ac7929"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dfaf11c8a116394da3b65cd4b36de30d8552fa45b8119b9ef5ca6638ab964fa3"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d7ceb9b2fe700ee09a0c6b192c5ef03c56eb82a0514218d8ff700f6ade004108"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:bac6288e82f6296f82ed5285f597713acb2a6ae26618ffc6b429c597b392535c"}, + {file = "ruff-0.5.1-py3-none-win32.whl", hash = "sha256:5c441d9c24ec09e1cb190a04535c5379b36b73c4bc20aa180c54812c27d1cca4"}, + {file = "ruff-0.5.1-py3-none-win_amd64.whl", hash = "sha256:b1789bf2cd3d1b5a7d38397cac1398ddf3ad7f73f4de01b1e913e2abc7dfc51d"}, + {file = "ruff-0.5.1-py3-none-win_arm64.whl", hash = "sha256:2875b7596a740cbbd492f32d24be73e545a4ce0a3daf51e4f4e609962bfd3cd2"}, + {file = "ruff-0.5.1.tar.gz", hash = "sha256:3164488aebd89b1745b47fd00604fb4358d774465f20d1fcd907f9c0fc1b0655"}, ] -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - [[package]] name = "six" version = "1.16.0" @@ -1794,17 +1797,18 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] [[package]] name = "tenacity" -version = "8.2.3" +version = "8.5.0" description = "Retry code until it succeeds" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, - {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, ] [package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tomli" @@ -1845,13 +1849,13 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] @@ -1871,13 +1875,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -1996,20 +2000,20 @@ files = [ [[package]] name = "zipp" -version = "3.18.1" +version = "3.19.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, - {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, + {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, + {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [metadata] lock-version = "2.0" python-versions = ">=3.8,<4.0" -content-hash = "1f091d9340b0d96c5645eded33fa2e5b4edfe796def64b05a3becc41aa6a091d" +content-hash = "368c2c0c0639c94cf8e30bf951aad54342799d569e33651e9defa8defc57aa1c" diff --git a/pyproject.toml b/pyproject.toml index c664ed7d..e4476f1a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ show_missing = true minversion = "6.0" log_cli_level = "INFO" asyncio_mode = "auto" -markers = ["unstable"] +markers = ["unstable", "broker", "balancer"] # Formatting tools configuration [tool.black] @@ -41,8 +41,9 @@ tenacity = ">=8.0.1" pure-sasl = ">=0.6.2" jsonschema = ">=4.10" cryptography = ">42.0.0" -pydantic ="^1.10.7" +pydantic ="<2" pyyaml = "^6.0.1" +requests = "^2.32.3" [tool.poetry.group.fmt] optional = true @@ -68,6 +69,7 @@ optional = true pytest = ">=7.2" coverage = {extras = ["toml"], version = ">7.0"} pytest-mock = "^3.11.1" +ops-scenario = "^6.0.0" [tool.poetry.group.integration] optional = true diff --git a/requirements.txt b/requirements.txt index eb65604f..03046bcb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,25 @@ attrs==23.2.0 ; python_version >= "3.8" and python_version < "4.0" +certifi==2024.7.4 ; python_version >= "3.8" and python_version < "4.0" cffi==1.16.0 ; python_version >= "3.8" and python_version < "4.0" and platform_python_implementation != "PyPy" -cosl==0.0.11 ; python_version >= "3.8" and python_version < "4.0" -cryptography==42.0.5 ; python_version >= "3.8" and python_version < "4.0" +charset-normalizer==3.3.2 ; python_version >= "3.8" and python_version < "4.0" +cosl==0.0.12 ; python_version >= "3.8" and python_version < "4.0" +cryptography==42.0.8 ; python_version >= "3.8" and python_version < "4.0" +idna==3.7 ; python_version >= "3.8" and python_version < "4.0" importlib-resources==6.4.0 ; python_version >= "3.8" and python_version < "3.9" jsonschema-specifications==2023.12.1 ; python_version >= "3.8" and python_version < "4.0" jsonschema==4.22.0 ; python_version >= "3.8" and python_version < "4.0" kazoo==2.10.0 ; python_version >= "3.8" and python_version < "4.0" -ops==2.13.0 ; python_version >= "3.8" and python_version < "4.0" +ops==2.14.1 ; python_version >= "3.8" and python_version < "4.0" pkgutil-resolve-name==1.3.10 ; python_version >= "3.8" and python_version < "3.9" pure-sasl==0.6.2 ; python_version >= "3.8" and python_version < "4.0" pycparser==2.22 ; python_version >= "3.8" and python_version < "4.0" and platform_python_implementation != "PyPy" -pydantic==1.10.15 ; python_version >= "3.8" and python_version < "4.0" +pydantic==1.10.17 ; python_version >= "3.8" and python_version < "4.0" pyyaml==6.0.1 ; python_version >= "3.8" and python_version < "4.0" -referencing==0.35.0 ; python_version >= "3.8" and python_version < "4.0" -rpds-py==0.18.0 ; python_version >= "3.8" and python_version < "4.0" -tenacity==8.2.3 ; python_version >= "3.8" and python_version < "4.0" -typing-extensions==4.11.0 ; python_version >= "3.8" and python_version < "4.0" +referencing==0.35.1 ; python_version >= "3.8" and python_version < "4.0" +requests==2.32.3 ; python_version >= "3.8" and python_version < "4.0" +rpds-py==0.18.1 ; python_version >= "3.8" and python_version < "4.0" +tenacity==8.5.0 ; python_version >= "3.8" and python_version < "4.0" +typing-extensions==4.12.2 ; python_version >= "3.8" and python_version < "4.0" +urllib3==2.2.2 ; python_version >= "3.8" and python_version < "4.0" websocket-client==1.8.0 ; python_version >= "3.8" and python_version < "4.0" -zipp==3.18.1 ; python_version >= "3.8" and python_version < "3.9" +zipp==3.19.2 ; python_version >= "3.8" and python_version < "3.9" diff --git a/src/charm.py b/src/charm.py index 54794193..77f0f3c3 100755 --- a/src/charm.py +++ b/src/charm.py @@ -10,49 +10,31 @@ from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_agent.v0.cos_agent import COSAgentProvider from charms.operator_libs_linux.v0 import sysctl -from charms.operator_libs_linux.v1.snap import SnapError from charms.rolling_ops.v0.rollingops import RollingOpsManager, RunWithLock from ops import ( - ActiveStatus, EventBase, - SecretChangedEvent, - StartEvent, StatusBase, - StorageAttachedEvent, - StorageDetachingEvent, - StorageEvent, - UpdateStatusEvent, ) from ops.main import main from core.cluster import ClusterState from core.models import Substrates from core.structured_config import CharmConfig -from events.oauth import OAuthHandler -from events.password_actions import PasswordActionEvents -from events.provider import KafkaProvider +from events.balancer import BalancerOperator +from events.broker import BrokerOperator +from events.peer_cluster import PeerClusterEventsHandler from events.tls import TLSHandler -from events.upgrade import KafkaDependencyModel, KafkaUpgrade -from events.zookeeper import ZooKeeperHandler -from health import KafkaHealth from literals import ( CHARM_KEY, - DEPENDENCIES, - GROUP, + JMX_CC_PORT, JMX_EXPORTER_PORT, LOGS_RULES_DIR, METRICS_RULES_DIR, OS_REQUIREMENTS, - PEER, - REL_NAME, SUBSTRATE, - USER, DebugLevel, Status, ) -from managers.auth import AuthManager -from managers.config import ConfigManager -from managers.tls import TLSManager from workload import KafkaWorkload logger = logging.getLogger(__name__) @@ -67,328 +49,119 @@ def __init__(self, *args): super().__init__(*args) self.name = CHARM_KEY self.substrate: Substrates = SUBSTRATE - self.workload = KafkaWorkload() - self.state = ClusterState(self, substrate=self.substrate) - - self.health = KafkaHealth(self) - - # HANDLERS - - self.password_action_events = PasswordActionEvents(self) - self.zookeeper = ZooKeeperHandler(self) - self.tls = TLSHandler(self) - self.oauth = OAuthHandler(self) - self.provider = KafkaProvider(self) - self.upgrade = KafkaUpgrade( - self, - dependency_model=KafkaDependencyModel( - **DEPENDENCIES # pyright: ignore[reportGeneralTypeIssues, reportArgumentType] - ), - ) - - # MANAGERS - self.config_manager = ConfigManager( - state=self.state, - workload=self.workload, - config=self.config, - current_version=self.upgrade.current_version, - ) - self.tls_manager = TLSManager( - state=self.state, workload=self.workload, substrate=self.substrate - ) - self.auth_manager = AuthManager( - state=self.state, - workload=self.workload, - kafka_opts=self.config_manager.kafka_opts, - log4j_opts=self.config_manager.tools_log4j_opts, - ) + # Common attrs init + self.state = ClusterState(self, substrate=self.substrate) + self.sysctl_config = sysctl.Config(name=CHARM_KEY) - # LIB HANDLERS + self.workload = KafkaWorkload() # Will be re-instantiated for each role. + self.restart = RollingOpsManager(self, relation="restart", callback=self._restart_broker) - self.sysctl_config = sysctl.Config(name=CHARM_KEY) - self.restart = RollingOpsManager(self, relation="restart", callback=self._restart) self._grafana_agent = COSAgentProvider( self, metrics_endpoints=[ # Endpoint for the kafka and jmx exporters # See https://github.com/canonical/charmed-kafka-snap for details {"path": "/metrics", "port": JMX_EXPORTER_PORT}, + {"path": "/metrics", "port": JMX_CC_PORT}, ], metrics_rules_dir=METRICS_RULES_DIR, logs_rules_dir=LOGS_RULES_DIR, - log_slots=[f"{self.workload.SNAP_NAME}:{self.workload.LOG_SLOT}"], + log_slots=[f"{self.workload.SNAP_NAME}:{slot}" for slot in self.workload.LOG_SLOTS], ) self.framework.observe(getattr(self.on, "install"), self._on_install) - self.framework.observe(getattr(self.on, "start"), self._on_start) - self.framework.observe(getattr(self.on, "config_changed"), self._on_config_changed) - self.framework.observe(getattr(self.on, "update_status"), self._on_update_status) self.framework.observe(getattr(self.on, "remove"), self._on_remove) - self.framework.observe(getattr(self.on, "secret_changed"), self._on_secret_changed) + self.framework.observe(getattr(self.on, "config_changed"), self._on_roles_changed) - self.framework.observe(self.on[PEER].relation_changed, self._on_config_changed) + # peer-cluster events are shared between all roles, so necessary to init here to avoid instantiating multiple times + self.peer_cluster = PeerClusterEventsHandler(self) - self.framework.observe( - getattr(self.on, "data_storage_attached"), self._on_storage_attached - ) - self.framework.observe( - getattr(self.on, "data_storage_detaching"), self._on_storage_detaching - ) + # Register roles event handlers after global ones, so that they get the priority. + self.broker = BrokerOperator(self) + self.balancer = BalancerOperator(self) + + self.tls = TLSHandler(self) def _on_install(self, _) -> None: """Handler for `install` event.""" - if self.workload.install(): - self._set_os_config() - self.config_manager.set_environment() - self.unit.set_workload_version(self.workload.get_version()) - - else: + if not self.workload.install(): self._set_status(Status.SNAP_NOT_INSTALLED) - - def _on_start(self, event: StartEvent) -> None: - """Handler for `start` event.""" - self._set_status(self.state.ready_to_start) - if not isinstance(self.unit.status, ActiveStatus): - event.defer() return - # required settings given zookeeper connection config has been created - self.config_manager.set_zk_jaas_config() - self.config_manager.set_server_properties() - self.config_manager.set_client_properties() - - # start kafka service - self.workload.start() - logger.info("Kafka snap started") - - # check for connection - self.on.update_status.emit() - - # only log once on successful 'on-start' run - if isinstance(self.unit.status, ActiveStatus): - logger.info(f'Broker {self.unit.name.split("/")[1]} connected') - - def _on_config_changed(self, event: EventBase) -> None: - """Generic handler for most `config_changed` events across relations.""" - # only overwrite properties if service is already active - if not self.healthy or not self.upgrade.idle: - event.defer() - return - - # Load current properties set in the charm workload - properties = self.workload.read(self.workload.paths.server_properties) - properties_changed = set(properties) ^ set(self.config_manager.server_properties) - - zk_jaas = self.workload.read(self.workload.paths.zk_jaas) - zk_jaas_changed = set(zk_jaas) ^ set(self.config_manager.zk_jaas_config.splitlines()) - - if not properties or not zk_jaas: - # Event fired before charm has properly started - event.defer() - return - - # update environment - self.config_manager.set_environment() - self.unit.set_workload_version(self.workload.get_version()) - - if zk_jaas_changed: - clean_broker_jaas = [conf.strip() for conf in zk_jaas] - clean_config_jaas = [ - conf.strip() for conf in self.config_manager.zk_jaas_config.splitlines() - ] - logger.info( - ( - f'Broker {self.unit.name.split("/")[1]} updating JAAS config - ' - f"OLD JAAS = {set(clean_broker_jaas) - set(clean_config_jaas)}, " - f"NEW JAAS = {set(clean_config_jaas) - set(clean_broker_jaas)}" - ) - ) - self.config_manager.set_zk_jaas_config() - - if properties_changed: - logger.info( - ( - f'Broker {self.unit.name.split("/")[1]} updating config - ' - f"OLD PROPERTIES = {set(properties) - set(self.config_manager.server_properties)}, " - f"NEW PROPERTIES = {set(self.config_manager.server_properties) - set(properties)}" - ) - ) - self.config_manager.set_server_properties() - - if zk_jaas_changed or properties_changed: - if isinstance(event, StorageEvent): # to get new storages - self.on[f"{self.restart.name}"].acquire_lock.emit( - callback_override="_disable_enable_restart" - ) - else: - self.on[f"{self.restart.name}"].acquire_lock.emit() - - # update client_properties whenever possible - self.config_manager.set_client_properties() - - # If Kafka is related to client charms, update their information. - if self.model.relations.get(REL_NAME, None) and self.unit.is_leader(): - self.update_client_data() - - def _on_update_status(self, _: UpdateStatusEvent) -> None: - """Handler for `update-status` events.""" - if not self.healthy or not self.upgrade.idle: - return - - if not self.state.zookeeper.broker_active(): - self._set_status(Status.ZK_NOT_CONNECTED) - return - - # NOTE for situations like IP change and late integration with rack-awareness charm. - # If properties have changed, the broker will restart. - self.on.config_changed.emit() + self._set_os_config() + def _set_os_config(self) -> None: + """Sets sysctl config.""" try: - if not self.health.machine_configured(): - self._set_status(Status.SYSCONF_NOT_OPTIMAL) - return - except SnapError as e: - logger.debug(f"Error: {e}") - self._set_status(Status.SNAP_NOT_RUNNING) - return - - self._set_status(Status.ACTIVE) + self.sysctl_config.configure(OS_REQUIREMENTS) + except (sysctl.ApplyError, sysctl.ValidationError, sysctl.CommandError) as e: + logger.error(f"Error setting values on sysctl: {e.message}") + self._set_status(Status.SYSCONF_NOT_POSSIBLE) def _on_remove(self, _) -> None: """Handler for stop.""" self.sysctl_config.remove() - def _on_secret_changed(self, event: SecretChangedEvent) -> None: - """Handler for `secret_changed` events.""" - if not event.secret.label or not self.state.cluster.relation: - return + def _set_status(self, key: Status) -> None: + """Sets charm status.""" + status: StatusBase = key.value.status + log_level: DebugLevel = key.value.log_level + + getattr(logger, log_level.lower())(status.message) + self.unit.status = status + + def _on_roles_changed(self, _): + """Handler for `config_changed` events. + + This handler is in charge of stopping the workloads, since the sub-operators would not + be instantiated if roles are changed. + """ + if not self.state.runs_broker and self.broker.workload.active(): + self.broker.workload.stop() - if event.secret.label == self.state.cluster.data_interface._generate_secret_label( - PEER, - self.state.cluster.relation.id, - "extra", # pyright: ignore[reportArgumentType] -- Changes with the https://github.com/canonical/data-platform-libs/issues/124 + if ( + not self.state.runs_balancer + and self.unit.is_leader() + and self.balancer.workload.active() ): - self.on.config_changed.emit() - - def _on_storage_attached(self, event: StorageAttachedEvent) -> None: - """Handler for `storage_attached` events.""" - # new dirs won't be used until topic partitions are assigned to it - # either automatically for new topics, or manually for existing - # set status only for running services, not on startup - self.workload.exec(f"chmod -R 750 {self.workload.paths.data_path}") - self.workload.exec(f"chown -R {USER}:{GROUP} {self.workload.paths.data_path}") - self.workload.exec( - f"""find {self.workload.paths.data_path} -type f -name "meta.properties" -delete || true""" - ) - if self.workload.active(): - self._set_status(Status.ADDED_STORAGE) - self._on_config_changed(event) - - def _on_storage_detaching(self, _: StorageDetachingEvent) -> None: - """Handler for `storage_detaching` events.""" - # in the case where there may be replication recovery may be possible - if self.state.brokers and len(self.state.brokers) > 1: - self._set_status(Status.REMOVED_STORAGE) - else: - self._set_status(Status.REMOVED_STORAGE_NO_REPL) + self.balancer.workload.stop() - self.on.config_changed.emit() + def _restart_broker(self, event: EventBase) -> None: + """Handler for `rolling_ops` restart events. - def _restart(self, event: EventBase) -> None: - """Handler for `rolling_ops` restart events.""" + The RollingOpsManager expecting a charm instance, we cannot move this method to the broker logic. + """ # only attempt restart if service is already active - if not self.healthy: + if not self.broker.healthy: event.defer() return - self.workload.restart() + self.broker.workload.restart() # FIXME: This logic should be improved as part of ticket DPE-3155 # For more information, please refer to https://warthogs.atlassian.net/browse/DPE-3155 time.sleep(10.0) - if self.workload.active(): - logger.info(f'Broker {self.unit.name.split("/")[1]} restarted') - else: - logger.error(f"Broker {self.unit.name.split('/')[1]} failed to restart") + def _disable_enable_restart_broker(self, event: RunWithLock) -> None: + """Handler for `rolling_ops` disable_enable restart events. - def _disable_enable_restart(self, event: RunWithLock) -> None: - """Handler for `rolling_ops` disable_enable restart events.""" - if not self.healthy: + The RollingOpsManager expecting a charm instance, we cannot move this method to the broker logic. + """ + if not self.broker.healthy: logger.warning(f"Broker {self.unit.name.split('/')[1]} is not ready restart") event.defer() return - self.workload.disable_enable() - self.workload.start() + self.broker.workload.disable_enable() + self.broker.workload.start() - if self.workload.active(): + if self.broker.workload.active(): logger.info(f'Broker {self.unit.name.split("/")[1]} restarted') else: logger.error(f"Broker {self.unit.name.split('/')[1]} failed to restart") return - def _set_os_config(self) -> None: - """Sets sysctl config.""" - try: - self.sysctl_config.configure(OS_REQUIREMENTS) - except (sysctl.ApplyError, sysctl.ValidationError, sysctl.CommandError) as e: - logger.error(f"Error setting values on sysctl: {e.message}") - self._set_status(Status.SYSCONF_NOT_POSSIBLE) - - @property - def healthy(self) -> bool: - """Checks and updates various charm lifecycle states. - - Is slow to fail due to retries, to be used sparingly. - - Returns: - True if service is alive and active. Otherwise False - """ - self._set_status(self.state.ready_to_start) - if not isinstance(self.unit.status, ActiveStatus): - return False - - if not self.workload.active(): - self._set_status(Status.SNAP_NOT_RUNNING) - return False - - return True - - def update_client_data(self) -> None: - """Writes necessary relation data to all related client applications.""" - if not self.unit.is_leader() or not self.healthy: - return - - for client in self.state.clients: - if not client.password: - logger.debug( - f"Skipping update of {client.app.name}, user has not yet been added..." - ) - continue - - client.update( - { - "endpoints": client.bootstrap_server, - "zookeeper-uris": client.zookeeper_uris, - "consumer-group-prefix": client.consumer_group_prefix, - "topic": client.topic, - "username": client.username, - "password": client.password, - "tls": client.tls, - "tls-ca": client.tls, # TODO: fix tls-ca - } - ) - - def _set_status(self, key: Status) -> None: - """Sets charm status.""" - status: StatusBase = key.value.status - log_level: DebugLevel = key.value.log_level - - getattr(logger, log_level.lower())(status.message) - self.unit.status = status - if __name__ == "__main__": main(KafkaCharm) diff --git a/src/core/cluster.py b/src/core/cluster.py index 54f3c921..e24fb5d8 100644 --- a/src/core/cluster.py +++ b/src/core/cluster.py @@ -6,22 +6,40 @@ import os from functools import cached_property +from typing import TYPE_CHECKING, Any from charms.data_platform_libs.v0.data_interfaces import ( + SECRET_GROUPS, DatabaseRequirerData, DataPeerData, DataPeerOtherUnitData, DataPeerUnitData, KafkaProviderData, + ProviderData, + RequirerData, ) -from ops import Framework, Object, Relation +from ops import Object, Relation from ops.model import Unit -from core.models import KafkaBroker, KafkaClient, KafkaCluster, OAuth, ZooKeeper +from core.models import ( + JSON, + KafkaBroker, + KafkaClient, + KafkaCluster, + OAuth, + PeerCluster, + ZooKeeper, +) from literals import ( + ADMIN_USER, + BALANCER, + BROKER, INTERNAL_USERS, + MIN_REPLICAS, OAUTH_REL_NAME, PEER, + PEER_CLUSTER_ORCHESTRATOR_RELATION, + PEER_CLUSTER_RELATION, REL_NAME, SECRETS_UNIT, SECURITY_PROTOCOL_PORTS, @@ -31,13 +49,49 @@ Substrates, ) +if TYPE_CHECKING: + from charm import KafkaCharm + +custom_secret_groups = SECRET_GROUPS +setattr(custom_secret_groups, "BROKER", "broker") +setattr(custom_secret_groups, "BALANCER", "balancer") +setattr(custom_secret_groups, "ZOOKEEPER", "zookeeper") + +SECRET_LABEL_MAP = { + "broker-username": getattr(custom_secret_groups, "BROKER"), + "broker-password": getattr(custom_secret_groups, "BROKER"), + "broker-uris": getattr(custom_secret_groups, "BROKER"), + "zk-username": getattr(custom_secret_groups, "ZOOKEEPER"), + "zk-password": getattr(custom_secret_groups, "ZOOKEEPER"), + "zk-uris": getattr(custom_secret_groups, "ZOOKEEPER"), + "balancer-username": getattr(custom_secret_groups, "BALANCER"), + "balancer-password": getattr(custom_secret_groups, "BALANCER"), + "balancer-uris": getattr(custom_secret_groups, "BALANCER"), +} + + +class PeerClusterOrchestratorData(ProviderData, RequirerData): + """Broker provider data model.""" + + SECRET_LABEL_MAP = SECRET_LABEL_MAP + SECRET_FIELDS = BALANCER.requested_secrets + + +class PeerClusterData(ProviderData, RequirerData): + """Broker provider data model.""" + + SECRET_LABEL_MAP = SECRET_LABEL_MAP + SECRET_FIELDS = BROKER.requested_secrets + class ClusterState(Object): """Collection of global cluster state for the Kafka services.""" - def __init__(self, charm: Framework | Object, substrate: Substrates): + def __init__(self, charm: "KafkaCharm", substrate: Substrates): super().__init__(parent=charm, key="charm_state") self.substrate: Substrates = substrate + self.roles = charm.config.roles + self.network_bandwidth = charm.config.network_bandwidth self.peer_app_interface = DataPeerData(self.model, relation_name=PEER) self.peer_unit_interface = DataPeerUnitData( @@ -65,6 +119,76 @@ def client_relations(self) -> set[Relation]: """The relations of all client applications.""" return set(self.model.relations[REL_NAME]) + @property + def peer_cluster_orchestrator_relations(self) -> set[Relation]: + """The `peer-cluster-orchestrator` relations that this charm is providing.""" + return set(self.model.relations[PEER_CLUSTER_ORCHESTRATOR_RELATION]) + + @property + def peer_cluster_relation(self) -> Relation | None: + """The `peer-cluster` relation that this charm is requiring.""" + return self.model.get_relation(PEER_CLUSTER_RELATION) + + @property + def peer_clusters(self) -> set[PeerCluster]: + """The state for all related `peer-cluster` applications that this charm is providing for.""" + peer_clusters = set() + balancer_kwargs: dict[str, Any] = { + "balancer_username": self.cluster.balancer_username, + "balancer_password": self.cluster.balancer_password, + "balancer_uris": self.cluster.balancer_uris, + } + for relation in self.peer_cluster_orchestrator_relations: + if not relation.app or not self.runs_balancer: + continue + + peer_clusters.add( + PeerCluster( + relation=relation, + data_interface=PeerClusterOrchestratorData(self.model, relation.name), + **balancer_kwargs, + ) + ) + + return peer_clusters + + # FIXME: will need renaming once we use Kraft as the orchestrator + # uses the 'already there' BALANCER username now + # will need to create one independently with Basic HTTP auth + multiple broker apps + # right now, multiple<->multiple is very brittle + @property + def balancer(self) -> PeerCluster: + """The state for the `peer-cluster-orchestrator` related balancer application.""" + balancer_kwargs: dict[str, Any] = ( + { + "balancer_username": self.cluster.balancer_username, + "balancer_password": self.cluster.balancer_password, + "balancer_uris": self.cluster.balancer_uris, + } + if self.runs_balancer + else {} + ) + + if self.runs_broker: # must be requiring, initialise with necessary broker data + return PeerCluster( + relation=self.peer_cluster_relation, # if same app, this will be None and OK + data_interface=PeerClusterData(self.model, PEER_CLUSTER_RELATION), + broker_username=ADMIN_USER, + broker_password=self.cluster.internal_user_credentials.get(ADMIN_USER, ""), + broker_uris=self.bootstrap_server, + racks=self.racks, + broker_capacities=self.broker_capacities, + zk_username=self.zookeeper.username, + zk_password=self.zookeeper.password, + zk_uris=self.zookeeper.uris, + **balancer_kwargs, # in case of roles=broker,balancer on this app + ) + + else: # must be roles=balancer only then, only load with necessary balancer data + return list(self.peer_clusters)[ + 0 + ] # for broker - balancer relation, currently limited to 1 + @property def oauth_relation(self) -> Relation | None: """The OAuth relation.""" @@ -100,7 +224,6 @@ def cluster(self) -> KafkaCluster: relation=self.peer_relation, data_interface=self.peer_app_interface, component=self.model.app, - substrate=self.substrate, ) @property @@ -130,7 +253,6 @@ def zookeeper(self) -> ZooKeeper: return ZooKeeper( relation=self.zookeeper_relation, data_interface=self.zookeeper_requires_interface, - substrate=self.substrate, local_app=self.cluster.app, ) @@ -154,7 +276,6 @@ def clients(self) -> set[KafkaClient]: relation=relation, data_interface=self.client_provider_interface, component=relation.app, - substrate=self.substrate, local_app=self.cluster.app, bootstrap_server=self.bootstrap_server, password=self.cluster.client_passwords.get(f"relation-{relation.id}", ""), @@ -228,7 +349,35 @@ def planned_units(self) -> int: return self.model.app.planned_units() @property - def ready_to_start(self) -> Status: + def racks(self) -> int: + """Number of racks for the brokers.""" + return len({broker.rack for broker in self.brokers if broker.rack}) + + @property + def broker_capacities(self) -> dict[str, list[JSON]]: + """The capacities for all Kafka broker.""" + broker_capacities = [] + for broker in self.brokers: + if not all([broker.cores, broker.storages]): + return {} + + broker_capacities.append( + { + "brokerId": str(broker.unit_id), + "capacity": { + "DISK": broker.storages, + "CPU": {"num.cores": broker.cores}, + "NW_IN": str(self.network_bandwidth), + "NW_OUT": str(self.network_bandwidth), + }, + "doc": str(broker.host), + } + ) + + return {"brokerCapacities": broker_capacities} + + @property + def ready_to_start(self) -> Status: # noqa: C901 """Check for active ZooKeeper relation and adding of inter-broker auth username. Returns: @@ -237,6 +386,35 @@ def ready_to_start(self) -> Status: if not self.peer_relation: return Status.NO_PEER_RELATION + for status in [self._broker_status, self._balancer_status]: + if status != Status.ACTIVE: + return status + + return Status.ACTIVE + + @property + def _balancer_status(self) -> Status: + """Checks for role=balancer specific readiness.""" + if not self.runs_balancer or not self.unit_broker.unit.is_leader(): + return Status.ACTIVE + + if not self.peer_cluster_orchestrator_relations and not self.runs_broker: + return Status.NO_PEER_CLUSTER_RELATION + + if not self.balancer.broker_connected: + return Status.NO_BROKER_DATA + + if len(self.balancer.broker_capacities.get("brokerCapacities", [])) < MIN_REPLICAS: + return Status.NOT_ENOUGH_BROKERS + + return Status.ACTIVE + + @property + def _broker_status(self) -> Status: + """Checks for role=broker specific readiness.""" + if not self.runs_broker: + return Status.ACTIVE + if not self.zookeeper: return Status.ZK_NOT_RELATED @@ -254,3 +432,13 @@ def ready_to_start(self) -> Status: return Status.NO_BROKER_CREDS return Status.ACTIVE + + @property + def runs_balancer(self) -> bool: + """Is the charm enabling the balancer?""" + return BALANCER.value in self.roles + + @property + def runs_broker(self) -> bool: + """Is the charm enabling the broker(s)?""" + return BROKER.value in self.roles diff --git a/src/core/models.py b/src/core/models.py index 51016966..acd995e5 100644 --- a/src/core/models.py +++ b/src/core/models.py @@ -4,21 +4,28 @@ """Collection of state objects for the Kafka relations, apps and units.""" +import json import logging -from typing import MutableMapping +from typing import MutableMapping, TypeAlias import requests -from charms.data_platform_libs.v0.data_interfaces import Data, DataPeerData, DataPeerUnitData +from charms.data_platform_libs.v0.data_interfaces import ( + Data, + DataPeerData, + DataPeerUnitData, +) from charms.zookeeper.v0.client import QuorumLeaderNotFoundError, ZooKeeperManager from kazoo.client import AuthFailedError, NoNodeError from ops.model import Application, Relation, Unit from tenacity import retry, retry_if_result, stop_after_attempt, wait_fixed from typing_extensions import override -from literals import INTERNAL_USERS, SECRETS_APP, Substrates +from literals import BALANCER, BROKER, INTERNAL_USERS, SECRETS_APP, Substrates logger = logging.getLogger(__name__) +JSON: TypeAlias = dict[str, "JSON"] | list["JSON"] | str | int | float | bool | None + class RelationState: """Relation state object.""" @@ -28,7 +35,7 @@ def __init__( relation: Relation | None, data_interface: Data, component: Unit | Application | None, - substrate: Substrates, + substrate: Substrates | None = None, ): self.relation = relation self.data_interface = data_interface @@ -58,6 +65,243 @@ def update(self, items: dict[str, str]) -> None: del self.relation_data[field] +class PeerCluster(RelationState): + """State collection metadata for a peer-cluster application.""" + + def __init__( + self, + relation: Relation | None, + data_interface: Data, + broker_username: str = "", + broker_password: str = "", + broker_uris: str = "", + racks: int = 0, + broker_capacities: dict[str, list[JSON]] = {}, + zk_username: str = "", + zk_password: str = "", + zk_uris: str = "", + balancer_username: str = "", + balancer_password: str = "", + balancer_uris: str = "", + ): + super().__init__(relation, data_interface, None, None) + self._broker_username = broker_username + self._broker_password = broker_password + self._broker_uris = broker_uris + self._racks = racks + self._broker_capacities = broker_capacities + self._zk_username = zk_username + self._zk_password = zk_password + self._zk_uris = zk_uris + self._balancer_username = balancer_username + self._balancer_password = balancer_password + self._balancer_uris = balancer_uris + + @property + def roles(self) -> str: + """All the roles pass from the related application.""" + if not self.relation: + return "" + + return ( + self.data_interface.fetch_relation_field(relation_id=self.relation.id, field="roles") + or "" + ) + + @property + def broker_username(self) -> str: + """The provided username for the broker application.""" + if self._broker_username: + return self._broker_username + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BALANCER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("broker-username", "") + + @property + def broker_password(self) -> str: + """The provided password for the broker application.""" + if self._broker_password: + return self._broker_password + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BALANCER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("broker-password", "") + + @property + def broker_uris(self) -> str: + """The provided uris for the balancer application to connect to the broker application.""" + if self._broker_uris: + return self._broker_uris + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BALANCER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("broker-uris", "") + + @property + def racks(self) -> int: + """The number of racks for the brokers.""" + if self._racks: + return self._racks + + if not self.relation: + return 0 + + return int( + self.data_interface.fetch_relation_field(relation_id=self.relation.id, field="racks") + or 0 + ) + + @property + def broker_capacities(self) -> dict[str, list[JSON]]: + """The capacities for all Kafka brokers.""" + if self._broker_capacities: + return self._broker_capacities + + if not self.relation: + return {} + + return json.loads( + self.data_interface.fetch_relation_field( + relation_id=self.relation.id, field="broker-capacities" + ) + or "{}" + ) + + @property + def zk_username(self) -> str: + """Username to connect to ZooKeeper.""" + if self._zk_username: + return self._zk_username + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BALANCER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("zk-username", "") + + @property + def zk_password(self) -> str: + """Password to connect to ZooKeeper.""" + if self._zk_password: + return self._zk_password + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BALANCER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("zk-password", "") + + @property + def zk_uris(self) -> str: + """The ZooKeeper server endpoints for the balancer application to connect with.""" + if self._zk_uris: + return self._zk_uris + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BALANCER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("zk-uris", "") + + @property + def balancer_username(self) -> str: + """The provided username for the balancer application.""" + if self._balancer_username: + return self._balancer_username + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BROKER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("balancer-username", "") + + @property + def balancer_password(self) -> str: + """The provided password for the balancer application.""" + if self._balancer_password: + return self._balancer_password + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BROKER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("balancer-password", "") + + @property + def balancer_uris(self) -> str: + """The provided uris for the broker application to connect to the balancer application.""" + if self._balancer_uris: + return self._balancer_uris + + if not self.relation or not self.relation.app: + return "" + + return self.data_interface._fetch_relation_data_with_secrets( + component=self.relation.app, + req_secret_fields=BROKER.requested_secrets, + relation=self.relation, + fields=BALANCER.requested_secrets, + ).get("balancer-uris", "") + + @property + def broker_connected(self) -> bool: + """Checks if there is an active broker relation with all necessary data.""" + if not all( + [ + self.broker_username, + self.broker_password, + self.broker_uris, + self.zk_username, + self.zk_password, + self.zk_uris, + self.broker_capacities, + # rack is optional, empty if not rack-aware + ] + ): + return False + + return True + + class KafkaCluster(RelationState): """State collection metadata for the peer relation.""" @@ -66,9 +310,8 @@ def __init__( relation: Relation | None, data_interface: DataPeerData, component: Application, - substrate: Substrates, ): - super().__init__(relation, data_interface, component, substrate) + super().__init__(relation, data_interface, component, None) self.data_interface = data_interface self.app = component @@ -131,6 +374,21 @@ def mtls_enabled(self) -> bool: """ return self.relation_data.get("mtls", "disabled") == "enabled" + @property + def balancer_username(self) -> bool: + """Persisted balancer username.""" + return self.relation_data.get("balancer-username", "") + + @property + def balancer_password(self) -> bool: + """Persisted balancer password.""" + return self.relation_data.get("balancer-password", "") + + @property + def balancer_uris(self) -> bool: + """Persisted balancer uris.""" + return self.relation_data.get("balancer-uris", "") + class KafkaBroker(RelationState): """State collection metadata for a unit.""" @@ -230,6 +488,21 @@ def truststore_password(self) -> str: """ return self.relation_data.get("truststore-password", "") + @property + def storages(self) -> JSON: + """The current Juju storages for the unit.""" + return json.loads(self.relation_data.get("storages", "{}")) + + @property + def cores(self) -> str: + """The number of CPU cores for the unit machine.""" + return self.relation_data.get("cores", "") + + @property + def rack(self) -> str: + """The rack for the broker on broker.rack from rack.properties.""" + return self.relation_data.get("rack", "") + class ZooKeeper(RelationState): """State collection metadata for a the Zookeeper relation.""" @@ -238,10 +511,9 @@ def __init__( self, relation: Relation | None, data_interface: Data, - substrate: Substrates, local_app: Application | None = None, ): - super().__init__(relation, data_interface, None, substrate) + super().__init__(relation, data_interface, None, None) self._local_app = local_app @property @@ -313,9 +585,15 @@ def uris(self) -> str: if not self.relation: return "" - return ( - self.data_interface.fetch_relation_field(relation_id=self.relation.id, field="uris") - or "" + return ",".join( + sorted( # sorting as they may be disordered + ( + self.data_interface.fetch_relation_field( + relation_id=self.relation.id, field="uris" + ) + or "" + ).split(",") + ) ) @property @@ -388,14 +666,13 @@ def __init__( relation: Relation | None, data_interface: Data, component: Application, - substrate: Substrates, local_app: Application | None = None, bootstrap_server: str = "", password: str = "", # nosec: B107 tls: str = "", zookeeper_uris: str = "", ): - super().__init__(relation, data_interface, component, substrate) + super().__init__(relation, data_interface, component, None) self.app = component self._local_app = local_app self._bootstrap_server = bootstrap_server diff --git a/src/core/structured_config.py b/src/core/structured_config.py index 8a5efcab..bf1489e0 100644 --- a/src/core/structured_config.py +++ b/src/core/structured_config.py @@ -8,7 +8,9 @@ from enum import Enum from charms.data_platform_libs.v0.data_models import BaseConfigModel -from pydantic import validator +from pydantic import Field, validator + +from literals import BALANCER, BROKER logger = logging.getLogger(__name__) @@ -50,6 +52,7 @@ class LogLevel(str, Enum): class CharmConfig(BaseConfigModel): """Manager for the structured configuration.""" + roles: str compression_type: str log_flush_interval_messages: int # int # long log_flush_interval_ms: int | None # long @@ -72,6 +75,9 @@ class CharmConfig(BaseConfigModel): profile: str certificate_extra_sans: str | None log_level: str + network_bandwidth: int = Field(default=50000, validate_default=False, gt=0) + cruisecontrol_balance_threshold: float = Field(default=1.1, validate_default=False, ge=1) + cruisecontrol_capacity_threshold: float = Field(default=0.8, validate_default=False, le=1) @validator("*", pre=True) @classmethod @@ -235,3 +241,14 @@ def log_level_values(cls, value: str) -> str | None: f"Value out of the accepted values. Could not properly parsed the roles configuration: {e}" ) return value + + @validator("roles", pre=True) + @classmethod + def roles_values(cls, value: str) -> str: + """Check roles values.""" + roles = set(map(str.strip, value.split(","))) + + if unknown_roles := roles - {BROKER.value, BALANCER.value}: + raise ValueError("Unknown role(s):", unknown_roles) + + return ",".join(sorted(roles)) # this has to be a string as it goes in to properties diff --git a/src/core/workload.py b/src/core/workload.py index ec31f705..3b932ac2 100644 --- a/src/core/workload.py +++ b/src/core/workload.py @@ -8,17 +8,18 @@ import string from abc import ABC, abstractmethod -from literals import PATHS +from literals import BALANCER, BROKER, Role -class KafkaPaths: +class CharmedKafkaPaths: """Object to store common paths for Kafka.""" - def __init__(self): - self.conf_path = PATHS["CONF"] - self.data_path = PATHS["DATA"] - self.binaries_path = PATHS["BIN"] - self.logs_path = PATHS["LOGS"] + def __init__(self, role: Role): + + self.conf_path = role.paths["CONF"] + self.data_path = role.paths["DATA"] + self.binaries_path = role.paths["BIN"] + self.logs_path = role.paths["LOGS"] @property def server_properties(self): @@ -44,6 +45,11 @@ def zk_jaas(self): """ return f"{self.conf_path}/zookeeper-jaas.cfg" + @property + def balancer_jaas(self): + """The cruise_control_jaas.conf filepath.""" + return f"{self.conf_path}/cruise_control_jaas.conf" + @property def keystore(self): """The Java Keystore containing service private-key and signed certificates.""" @@ -80,14 +86,34 @@ def jmx_prometheus_javaagent(self): @property def jmx_prometheus_config(self): - """The configuration for the JMX exporter.""" - return f"{self.conf_path}/jmx_prometheus.yaml" + """The configuration for the Kafka JMX exporter.""" + return f"{BROKER.paths['CONF']}/jmx_prometheus.yaml" + + @property + def jmx_cc_config(self): + """The configuration for the CruiseControl JMX exporter.""" + return f"{BALANCER.paths['CONF']}/jmx_cruise_control.yaml" + + @property + def cruise_control_properties(self): + """The cruisecontrol.properties filepath.""" + return f"{self.conf_path}/cruisecontrol.properties" + + @property + def capacity_jbod_json(self): + """The JBOD capacity JSON.""" + return f"{self.conf_path}/capacityJBOD.json" + + @property + def cruise_control_auth(self): + """The credentials file.""" + return f"{self.conf_path}/cruisecontrol.credentials" class WorkloadBase(ABC): """Base interface for common workload operations.""" - paths = KafkaPaths() + paths: CharmedKafkaPaths @abstractmethod def start(self) -> None: @@ -129,7 +155,10 @@ def write(self, content: str, path: str, mode: str = "w") -> None: @abstractmethod def exec( - self, command: str, env: dict[str, str] | None = None, working_dir: str | None = None + self, + command: list[str] | str, + env: dict[str, str] | None = None, + working_dir: str | None = None, ) -> str: """Runs a command on the workload substrate.""" ... diff --git a/src/events/balancer.py b/src/events/balancer.py new file mode 100644 index 00000000..7fac0ff2 --- /dev/null +++ b/src/events/balancer.py @@ -0,0 +1,221 @@ +"""Balancer role core charm logic.""" + +import logging +from subprocess import CalledProcessError +from typing import TYPE_CHECKING + +from ops import ( + ActiveStatus, + EventBase, + Object, +) +from ops.charm import ActionEvent + +from literals import ( + BALANCER, + BALANCER_WEBSERVER_PORT, + BALANCER_WEBSERVER_USER, + MODE_ADD, + MODE_REMOVE, + Status, +) +from managers.balancer import BalancerManager +from managers.config import BalancerConfigManager +from managers.tls import TLSManager +from workload import BalancerWorkload + +if TYPE_CHECKING: + from charm import KafkaCharm + +logger = logging.getLogger(__name__) + + +class BalancerOperator(Object): + """Implements the logic for the balancer.""" + + def __init__(self, charm) -> None: + super().__init__(charm, BALANCER.value) + self.charm: "KafkaCharm" = charm + + self.workload = BalancerWorkload() + + self.tls_manager = TLSManager( + state=self.charm.state, workload=self.workload, substrate=self.charm.substrate + ) + # Fast exit after workload instantiation, but before any event observer + if BALANCER.value not in self.charm.config.roles or not self.charm.unit.is_leader(): + return + + self.config_manager = BalancerConfigManager( + self.charm.state, self.workload, self.charm.config + ) + self.balancer_manager = BalancerManager(self) + + self.framework.observe(self.charm.on.install, self._on_install) + self.framework.observe(self.charm.on.start, self._on_start) + self.framework.observe(self.charm.on.leader_elected, self._on_start) + + # ensures data updates, eventually + self.framework.observe(self.charm.on.update_status, self._on_config_changed) + self.framework.observe(self.charm.on.config_changed, self._on_config_changed) + + self.framework.observe(getattr(self.charm.on, "rebalance_action"), self.rebalance) + + def _on_install(self, _) -> None: + """Handler for `install` event.""" + self.config_manager.set_environment() + + def _on_start(self, event: EventBase) -> None: + """Handler for `start` event.""" + self.charm._set_status(self.charm.state.ready_to_start) + if not isinstance(self.charm.unit.status, ActiveStatus): + event.defer() + return + + if not self.charm.state.cluster.balancer_password: + external_cluster = next(iter(self.charm.state.peer_clusters), None) + payload = { + "balancer-username": BALANCER_WEBSERVER_USER, + "balancer-password": self.charm.workload.generate_password(), + "balancer-uris": f"{self.charm.state.unit_broker.host}:{BALANCER_WEBSERVER_PORT}", + } + # Update relation data intra & extra cluster (if it exists) + self.charm.state.cluster.update(payload) + if external_cluster: + external_cluster.update(payload) + + self.config_manager.set_cruise_control_properties() + self.config_manager.set_broker_capacities() + self.config_manager.set_zk_jaas_config() + self.config_manager.set_cruise_control_auth() + + try: + self.balancer_manager.create_internal_topics() + except CalledProcessError as e: + logger.warning(e.stdout) + event.defer() + return + + self.workload.restart() + + logger.info("Cruise-control started") + + def _on_config_changed(self, event: EventBase) -> None: + """Generic handler for 'something changed' events.""" + if not self.charm.unit.is_leader(): + return + + if not self.healthy: + return + + # NOTE: smells like a good abstraction somewhere + changed_map = [ + ( + "properties", + self.workload.paths.cruise_control_properties, + self.config_manager.cruise_control_properties, + ), + ( + "jaas", + self.workload.paths.balancer_jaas, + self.config_manager.jaas_config.splitlines(), + ), + ] + + content_changed = False + for kind, path, state_content in changed_map: + file_content = self.workload.read(path) + if set(file_content) ^ set(state_content): + logger.info( + ( + f'Balancer {self.charm.unit.name.split("/")[1]} updating config - ' + f"OLD {kind.upper()} = {set(map(str.strip, file_content)) - set(map(str.strip, state_content))}, " + f"NEW {kind.upper()} = {set(map(str.strip, state_content)) - set(map(str.strip, file_content))}" + ) + ) + content_changed = True + + if content_changed: + # safe to update everything even if it hasn't changed, service will restart anyway + self.config_manager.set_cruise_control_properties() + self.config_manager.set_broker_capacities() + self.config_manager.set_zk_jaas_config() + + self._on_start(event) + + def rebalance(self, event: ActionEvent) -> None: + """Handles the `rebalance` Juju Action.""" + failure_conditions = [ + (not self.charm.unit.is_leader(), "Action must be ran on the application leader"), + ( + not self.balancer_manager.cruise_control.monitoring, + "CruiseControl balancer service is not yet ready", + ), + ( + self.balancer_manager.cruise_control.executing, + "CruiseControl balancer service is currently executing a task, please try again later", + ), + ( + not self.balancer_manager.cruise_control.ready, + "CruiseControl balancer service has not yet collected enough data to provide a partition reallocation proposal", + ), + ( + event.params.get("brokerid", None) is None + and event.params["mode"] in (MODE_ADD, MODE_REMOVE), + "'add' and 'remove' rebalance action require passing the 'brokerid' parameter", + ), + ( + event.params["mode"] in (MODE_ADD, MODE_REMOVE) + and event.params.get("brokerid") + not in [broker.unit_id for broker in self.charm.state.brokers], + "invalid brokerid", + ), + ] + + for check, msg in failure_conditions: + if check: + event.fail(msg) + return + + response, user_task_id = self.balancer_manager.rebalance(**event.params) + logger.debug(f"rebalance - {vars(response)=}") + + if response.status_code != 200: + event.fail( + f"'{event.params['mode']}' rebalance failed with status code {response.status_code}" + ) + return + + self.charm._set_status(Status.WAITING_FOR_REBALANCE) + + self.balancer_manager.wait_for_task(user_task_id) + + sanitised_response = self.balancer_manager.clean_results(response.json()) + if not isinstance(sanitised_response, dict): + event.fail("Unknown error") + return + + event.set_results(sanitised_response) + + self.charm._set_status(Status.ACTIVE) + + @property + def healthy(self) -> bool: + """Checks and updates various charm lifecycle states. + + Returns: + True if service is alive and active. Otherwise False + """ + # needed in case it's called by BrokerOperator in set_client_data + if not self.charm.state.runs_balancer: + return True + + self.charm._set_status(self.charm.state.ready_to_start) + if not isinstance(self.charm.unit.status, ActiveStatus): + return False + + if not self.workload.active() and self.charm.unit.is_leader(): + self.charm._set_status(Status.CC_NOT_RUNNING) + return False + + return True diff --git a/src/events/broker.py b/src/events/broker.py new file mode 100644 index 00000000..122bfe17 --- /dev/null +++ b/src/events/broker.py @@ -0,0 +1,343 @@ +"""Broker role core charm logic.""" + +import json +import logging +from typing import TYPE_CHECKING + +from charms.operator_libs_linux.v1.snap import SnapError +from ops import ( + ActiveStatus, + EventBase, + InstallEvent, + Object, + SecretChangedEvent, + StartEvent, + StorageAttachedEvent, + StorageDetachingEvent, + StorageEvent, + UpdateStatusEvent, +) + +from events.oauth import OAuthHandler +from events.password_actions import PasswordActionEvents +from events.provider import KafkaProvider +from events.upgrade import KafkaDependencyModel, KafkaUpgrade +from events.zookeeper import ZooKeeperHandler +from health import KafkaHealth +from literals import ( + BROKER, + DEPENDENCIES, + GROUP, + PEER, + REL_NAME, + USER, + Status, +) +from managers.auth import AuthManager +from managers.balancer import BalancerManager +from managers.config import ConfigManager +from managers.tls import TLSManager +from workload import KafkaWorkload + +if TYPE_CHECKING: + from charm import KafkaCharm + +logger = logging.getLogger(__name__) + + +class BrokerOperator(Object): + """Charmed Operator for Kafka.""" + + def __init__(self, charm) -> None: + super().__init__(charm, BROKER.value) + self.charm: "KafkaCharm" = charm + + self.workload = KafkaWorkload() + + self.tls_manager = TLSManager( + state=self.charm.state, workload=self.workload, substrate=self.charm.substrate + ) + # Fast exit after workload instantiation, but before any event observer + if BROKER.value not in self.charm.config.roles: + return + + self.health = KafkaHealth(self) + self.upgrade = KafkaUpgrade( + self, + dependency_model=KafkaDependencyModel( + **DEPENDENCIES # pyright: ignore[reportGeneralTypeIssues, reportArgumentType] + ), + ) + self.password_action_events = PasswordActionEvents(self) + self.zookeeper = ZooKeeperHandler(self) + self.oauth = OAuthHandler(self) + self.provider = KafkaProvider(self) + + # MANAGERS + + self.config_manager = ConfigManager( + state=self.charm.state, + workload=self.workload, + config=self.charm.config, + current_version=self.upgrade.current_version, + ) + self.auth_manager = AuthManager( + state=self.charm.state, + workload=self.workload, + kafka_opts=self.config_manager.kafka_opts, + log4j_opts=self.config_manager.tools_log4j_opts, + ) + + self.balancer_manager = BalancerManager(self) + + self.framework.observe(getattr(self.charm.on, "install"), self._on_install) + self.framework.observe(getattr(self.charm.on, "start"), self._on_start) + self.framework.observe(getattr(self.charm.on, "config_changed"), self._on_config_changed) + self.framework.observe(getattr(self.charm.on, "update_status"), self._on_update_status) + self.framework.observe(getattr(self.charm.on, "secret_changed"), self._on_secret_changed) + + self.framework.observe(self.charm.on[PEER].relation_changed, self._on_config_changed) + + self.framework.observe( + getattr(self.charm.on, "data_storage_attached"), self._on_storage_attached + ) + self.framework.observe( + getattr(self.charm.on, "data_storage_detaching"), self._on_storage_detaching + ) + + def _on_install(self, _: InstallEvent) -> None: + """Handler for `install` event.""" + self.config_manager.set_environment() + self.charm.unit.set_workload_version(self.workload.get_version()) + + def _on_start(self, event: StartEvent) -> None: + """Handler for `start` event.""" + if self.charm.state.peer_relation: + self.charm.state.unit_broker.update( + {"cores": str(self.balancer_manager.cores), "rack": self.config_manager.rack} + ) + + self.charm._set_status(self.charm.state.ready_to_start) + if not isinstance(self.charm.unit.status, ActiveStatus): + event.defer() + return + + # required settings given zookeeper connection config has been created + self.config_manager.set_zk_jaas_config() + self.config_manager.set_server_properties() + self.config_manager.set_client_properties() + + # start kafka service + self.workload.start() + logger.info("Kafka snap started") + + # check for connection + self.charm.on.update_status.emit() + + # only log once on successful 'on-start' run + if isinstance(self.charm.unit.status, ActiveStatus): + logger.info(f'Broker {self.charm.unit.name.split("/")[1]} connected') + + def _on_config_changed(self, event: EventBase) -> None: + """Generic handler for most `config_changed` events across relations.""" + # only overwrite properties if service is already active + if not self.healthy or not self.upgrade.idle: + event.defer() + return + + # Load current properties set in the charm workload + properties = self.workload.read(self.workload.paths.server_properties) + properties_changed = set(properties) ^ set(self.config_manager.server_properties) + + zk_jaas = self.workload.read(self.workload.paths.zk_jaas) + zk_jaas_changed = set(zk_jaas) ^ set(self.config_manager.jaas_config.splitlines()) + + if not properties or not zk_jaas: + # Event fired before charm has properly started + event.defer() + return + + # update environment + self.config_manager.set_environment() + self.charm.unit.set_workload_version(self.workload.get_version()) + + if zk_jaas_changed: + clean_broker_jaas = [conf.strip() for conf in zk_jaas] + clean_config_jaas = [ + conf.strip() for conf in self.config_manager.jaas_config.splitlines() + ] + logger.info( + ( + f'Broker {self.charm.unit.name.split("/")[1]} updating JAAS config - ' + f"OLD JAAS = {set(clean_broker_jaas) - set(clean_config_jaas)}, " + f"NEW JAAS = {set(clean_config_jaas) - set(clean_broker_jaas)}" + ) + ) + self.config_manager.set_zk_jaas_config() + + if properties_changed: + logger.info( + ( + f'Broker {self.charm.unit.name.split("/")[1]} updating config - ' + f"OLD PROPERTIES = {set(properties) - set(self.config_manager.server_properties)}, " + f"NEW PROPERTIES = {set(self.config_manager.server_properties) - set(properties)}" + ) + ) + self.config_manager.set_server_properties() + + if zk_jaas_changed or properties_changed: + if isinstance(event, StorageEvent): # to get new storages + self.charm.on[f"{self.charm.restart.name}"].acquire_lock.emit( + callback_override="_disable_enable_restart_broker" + ) + else: + self.charm.on[f"{self.charm.restart.name}"].acquire_lock.emit() + + # update client_properties whenever possible + self.config_manager.set_client_properties() + + # If Kafka is related to client charms, update their information. + if self.model.relations.get(REL_NAME, None) and self.charm.unit.is_leader(): + self.update_client_data() + + if self.charm.state.peer_cluster_relation and self.charm.unit.is_leader(): + self.update_peer_cluster_data() + + def _on_update_status(self, _: UpdateStatusEvent) -> None: + """Handler for `update-status` events.""" + if not self.healthy or not self.upgrade.idle: + return + + if not self.charm.state.zookeeper.broker_active(): + self.charm._set_status(Status.ZK_NOT_CONNECTED) + return + + # NOTE for situations like IP change and late integration with rack-awareness charm. + # If properties have changed, the broker will restart. + self.charm.on.config_changed.emit() + + try: + if not self.health.machine_configured(): + self.charm._set_status(Status.SYSCONF_NOT_OPTIMAL) + return + except SnapError as e: + logger.debug(f"Error: {e}") + self.charm._set_status(Status.BROKER_NOT_RUNNING) + return + + self.charm._set_status(Status.ACTIVE) + + def _on_secret_changed(self, event: SecretChangedEvent) -> None: + """Handler for `secret_changed` events.""" + if not event.secret.label or not self.charm.state.cluster.relation: + return + + if event.secret.label == self.charm.state.cluster.data_interface._generate_secret_label( + PEER, + self.charm.state.cluster.relation.id, + "extra", # pyright: ignore[reportArgumentType] -- Changes with the https://github.com/canonical/data-platform-libs/issues/124 + ): + self.charm.on.config_changed.emit() + + def _on_storage_attached(self, event: StorageAttachedEvent) -> None: + """Handler for `storage_attached` events.""" + # storage-attached usually fires before relation-created/joined + if not self.charm.state.peer_relation: + event.defer() + return + + self.charm.state.unit_broker.update({"storages": self.balancer_manager.storages}) + + # new dirs won't be used until topic partitions are assigned to it + # either automatically for new topics, or manually for existing + # set status only for running services, not on startup + self.workload.exec(["chmod", "-R", "750", f"{self.workload.paths.data_path}"]) + self.workload.exec(["chown", "-R", f"{USER}:{GROUP}", f"{self.workload.paths.data_path}"]) + self.workload.exec( + [ + "bash", + "-c", + f"""find {self.workload.paths.data_path} -type f -name meta.properties -delete || true""", + ] + ) + + if self.workload.active(): + self.charm._set_status(Status.ADDED_STORAGE) + # We need the event handler to know about the original event + self._on_config_changed(event) + + def _on_storage_detaching(self, _: StorageDetachingEvent) -> None: + """Handler for `storage_detaching` events.""" + # in the case where there may be replication recovery may be possible + if self.charm.state.brokers and len(self.charm.state.brokers) > 1: + self.charm._set_status(Status.REMOVED_STORAGE) + else: + self.charm._set_status(Status.REMOVED_STORAGE_NO_REPL) + + self.charm.state.unit_broker.update({"storages": self.balancer_manager.storages}) + self.charm.on.config_changed.emit() + + @property + def healthy(self) -> bool: + """Checks and updates various charm lifecycle states. + + Is slow to fail due to retries, to be used sparingly. + + Returns: + True if service is alive and active. Otherwise False + """ + self.charm._set_status(self.charm.state.ready_to_start) + if not isinstance(self.charm.unit.status, ActiveStatus): + return False + + if not self.workload.active(): + self.charm._set_status(Status.BROKER_NOT_RUNNING) + return False + + return True + + def update_client_data(self) -> None: + """Writes necessary relation data to all related client applications.""" + if not self.charm.unit.is_leader() or not self.healthy or not self.charm.balancer.healthy: + return + + for client in self.charm.state.clients: + if not client.password: + logger.debug( + f"Skipping update of {client.app.name}, user has not yet been added..." + ) + continue + + client.update( + { + "endpoints": client.bootstrap_server, + "zookeeper-uris": client.zookeeper_uris, + "consumer-group-prefix": client.consumer_group_prefix, + "topic": client.topic, + "username": client.username, + "password": client.password, + "tls": client.tls, + "tls-ca": client.tls, # TODO: fix tls-ca + } + ) + + def update_peer_cluster_data(self) -> None: + """Writes updated relation data to other peer_cluster apps.""" + if not self.charm.unit.is_leader() or not self.healthy: + return + + self.charm.state.balancer.update( + { + "roles": self.charm.state.roles, + "broker-username": self.charm.state.balancer.broker_username, + "broker-password": self.charm.state.balancer.broker_password, + "broker-uris": self.charm.state.balancer.broker_uris, + "racks": str(self.charm.state.balancer.racks), + "broker-capacities": json.dumps(self.charm.state.balancer.broker_capacities), + "zk-uris": self.charm.state.balancer.zk_uris, + "zk-username": self.charm.state.balancer.zk_username, + "zk-password": self.charm.state.balancer.zk_password, + } + ) + + # self.charm.on.config_changed.emit() # ensure both broker+balancer get a changed event diff --git a/src/events/oauth.py b/src/events/oauth.py index 575e037a..24a680f9 100644 --- a/src/events/oauth.py +++ b/src/events/oauth.py @@ -13,6 +13,7 @@ if TYPE_CHECKING: from charm import KafkaCharm + from events.broker import BrokerOperator logger = logging.getLogger(__name__) @@ -20,12 +21,13 @@ class OAuthHandler(Object): """Handler for managing oauth relations.""" - def __init__(self, charm): - super().__init__(charm, "oauth") - self.charm: "KafkaCharm" = charm + def __init__(self, dependent: "BrokerOperator") -> None: + super().__init__(dependent, "oauth") + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm client_config = ClientConfig("https://kafka.local", "openid email", ["client_credentials"]) - self.oauth = OAuthRequirer(charm, client_config, relation_name=OAUTH_REL_NAME) + self.oauth = OAuthRequirer(self.charm, client_config, relation_name=OAUTH_REL_NAME) self.framework.observe( self.charm.on[OAUTH_REL_NAME].relation_changed, self._on_oauth_relation_changed ) @@ -37,4 +39,4 @@ def _on_oauth_relation_changed(self, event: EventBase) -> None: """Handler for `_on_oauth_relation_changed` event.""" if not self.charm.unit.is_leader() or not self.charm.state.brokers: return - self.charm._on_config_changed(event) + self.dependent._on_config_changed(event) diff --git a/src/events/password_actions.py b/src/events/password_actions.py index d7d6b348..ef1ed1c4 100644 --- a/src/events/password_actions.py +++ b/src/events/password_actions.py @@ -13,6 +13,7 @@ if TYPE_CHECKING: from charm import KafkaCharm + from events.broker import BrokerOperator logger = logging.getLogger(__name__) @@ -20,9 +21,10 @@ class PasswordActionEvents(Object): """Event handlers for password-related Juju Actions.""" - def __init__(self, charm): - super().__init__(charm, "password_events") - self.charm: "KafkaCharm" = charm + def __init__(self, dependent: "BrokerOperator") -> None: + super().__init__(dependent, "password_events") + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm self.framework.observe( getattr(self.charm.on, "set_password_action"), self._set_password_action @@ -43,13 +45,13 @@ def _set_password_action(self, event: ActionEvent) -> None: event.fail(msg) return - if not self.charm.upgrade.idle: - msg = f"Cannot set password while upgrading (upgrade_stack: {self.charm.upgrade.upgrade_stack})" + if not self.dependent.upgrade.idle: + msg = f"Cannot set password while upgrading (upgrade_stack: {self.dependent.upgrade.upgrade_stack})" logger.error(msg) event.fail(msg) return - if not self.charm.healthy: + if not self.dependent.healthy: msg = "Unit is not healthy" logger.error(msg) event.fail(msg) @@ -71,7 +73,7 @@ def _set_password_action(self, event: ActionEvent) -> None: return try: - self.charm.auth_manager.add_user( + self.dependent.auth_manager.add_user( username=username, password=new_password, zk_auth=True ) except Exception as e: @@ -92,7 +94,9 @@ def _get_admin_credentials_action(self, event: ActionEvent) -> None: event.fail(msg) return - admin_properties = set(client_properties) - set(self.charm.config_manager.tls_properties) + admin_properties = set(client_properties) - set( + self.dependent.config_manager.tls_properties + ) event.set_results( { diff --git a/src/events/peer_cluster.py b/src/events/peer_cluster.py new file mode 100644 index 00000000..ccb92170 --- /dev/null +++ b/src/events/peer_cluster.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""KafkaProvider class and methods.""" + +import json +import logging +from typing import TYPE_CHECKING + +from charms.data_platform_libs.v0.data_interfaces import ( + PROV_SECRET_PREFIX, + REQ_SECRET_FIELDS, + CachedSecret, + Data, + diff, + set_encoded_field, +) +from ops.charm import RelationChangedEvent, RelationCreatedEvent, RelationEvent, SecretChangedEvent +from ops.framework import Object + +from core.cluster import custom_secret_groups +from literals import ( + BALANCER, + BROKER, + PEER_CLUSTER_ORCHESTRATOR_RELATION, + PEER_CLUSTER_RELATION, +) + +if TYPE_CHECKING: + from charm import KafkaCharm + +logger = logging.getLogger(__name__) + + +class PeerClusterEventsHandler(Object): + """Implements the broker provider-side logic for peer-cluster relations.""" + + def __init__(self, charm: "KafkaCharm") -> None: + super().__init__(charm, "peer_cluster") + self.charm: "KafkaCharm" = charm + + self.framework.observe( + self.charm.on.secret_changed, + self._on_secret_changed_event, + ) + + for relation_name in [PEER_CLUSTER_RELATION, PEER_CLUSTER_ORCHESTRATOR_RELATION]: + self.framework.observe( + self.charm.on[relation_name].relation_created, + self._on_peer_cluster_created, + ) + + self.framework.observe( + self.charm.on[PEER_CLUSTER_RELATION].relation_changed, self._on_peer_cluster_changed + ) + self.framework.observe( + self.charm.on[PEER_CLUSTER_ORCHESTRATOR_RELATION].relation_changed, + self._on_peer_cluster_orchestrator_changed, + ) + + # ensures data updates, eventually + self.framework.observe( + getattr(self.charm.on, "update_status"), self._on_peer_cluster_changed + ) + + def _on_secret_changed_event(self, _: SecretChangedEvent) -> None: + pass + + def _on_peer_cluster_created(self, event: RelationCreatedEvent) -> None: + """Generic handler for peer-cluster `relation-created` events.""" + if not self.charm.unit.is_leader() or not event.relation.app: + return + + requested_secrets = ( + BALANCER.requested_secrets + if self.charm.state.runs_balancer + else BROKER.requested_secrets + ) or [] + + # request secrets for the relation + set_encoded_field( + event.relation, + self.charm.state.cluster.app, + REQ_SECRET_FIELDS, + requested_secrets, + ) + + # explicitly update the roles early, as we can't determine which model to instantiate + # until both applications have roles set + event.relation.data[self.charm.state.cluster.app].update({"roles": self.charm.state.roles}) + + def _on_peer_cluster_changed(self, event: RelationChangedEvent) -> None: + """Generic handler for peer-cluster `relation-changed` events.""" + if ( + not self.charm.unit.is_leader() + or not self.charm.state.runs_broker + or "balancer" not in self.charm.state.balancer.roles + ): + return + + self._default_relation_changed(event) + + # will no-op if relation does not exist + self.charm.state.balancer.update( + { + "roles": self.charm.state.roles, + "broker-username": self.charm.state.balancer.broker_username, + "broker-password": self.charm.state.balancer.broker_password, + "broker-uris": self.charm.state.balancer.broker_uris, + "racks": str(self.charm.state.balancer.racks), + "broker-capacities": json.dumps(self.charm.state.balancer.broker_capacities), + "zk-uris": self.charm.state.balancer.zk_uris, + "zk-username": self.charm.state.balancer.zk_username, + "zk-password": self.charm.state.balancer.zk_password, + } + ) + + self.charm.on.config_changed.emit() # ensure both broker+balancer get a changed event + + def _on_peer_cluster_orchestrator_changed(self, event: RelationChangedEvent) -> None: + """Generic handler for peer-cluster-orchestrator `relation-changed` events.""" + if not self.charm.unit.is_leader() or not self.charm.state.runs_balancer: + return + + self._default_relation_changed(event) + + for peer_cluster in self.charm.state.peer_clusters: + if "broker" not in peer_cluster.roles: + # TODO: maybe a log here? + continue + + # will no-op if relation does not exist + peer_cluster.update( + { + "balancer-username": self.charm.state.balancer.balancer_username, + "balancer-password": self.charm.state.balancer.balancer_password, + "balancer-uris": self.charm.state.balancer.balancer_uris, + } + ) + + self.charm.on.config_changed.emit() # ensure both broker+balancer get a changed event + + def _default_relation_changed(self, event: RelationChangedEvent): + """Implements required logic from multiple 'handled' events from the `data-interfaces` library.""" + if not isinstance(event, RelationEvent) or not event.relation or not event.relation.app: + return + + diff_data = diff(event, self.charm.state.cluster.app) + + if any(newval for newval in diff_data.added if newval.startswith(PROV_SECRET_PREFIX)): + for group in custom_secret_groups.groups(): + secret_field = f"{PROV_SECRET_PREFIX}{group}" + if secret_field in diff_data.added and ( + secret_uri := event.relation.data[event.relation.app].get(secret_field) + ): + label = Data._generate_secret_label( + event.relation.name, event.relation.id, group + ) + CachedSecret( + self.charm.model, self.charm.state.cluster.app, label, secret_uri + ).meta diff --git a/src/events/provider.py b/src/events/provider.py index 93c1ab2a..1cfcca19 100644 --- a/src/events/provider.py +++ b/src/events/provider.py @@ -20,6 +20,7 @@ if TYPE_CHECKING: from charm import KafkaCharm + from events.broker import BrokerOperator logger = logging.getLogger(__name__) @@ -27,9 +28,11 @@ class KafkaProvider(Object): """Implements the provider-side logic for client applications relating to Kafka.""" - def __init__(self, charm) -> None: - super().__init__(charm, "kafka_client") - self.charm: "KafkaCharm" = charm + def __init__(self, dependent: "BrokerOperator") -> None: + super().__init__(dependent, "kafka_client") + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm + self.kafka_provider = KafkaProviderEventHandlers( self.charm, self.charm.state.client_provider_interface ) @@ -43,12 +46,12 @@ def __init__(self, charm) -> None: def on_topic_requested(self, event: TopicRequestedEvent): """Handle the on topic requested event.""" - if not self.charm.healthy: + if not self.dependent.healthy: event.defer() return # on all unit update the server properties to enable client listener if needed - self.charm._on_config_changed(event) + self.dependent._on_config_changed(event) if not self.charm.unit.is_leader() or not self.charm.state.peer_relation: return @@ -67,7 +70,7 @@ def on_topic_requested(self, event: TopicRequestedEvent): # catching error here in case listeners not established for bootstrap-server auth try: - self.charm.auth_manager.add_user( + self.dependent.auth_manager.add_user( username=client.username, password=password, ) @@ -79,7 +82,7 @@ def on_topic_requested(self, event: TopicRequestedEvent): # non-leader units need cluster_config_changed event to update their super.users self.charm.state.cluster.update({client.username: password}) - self.charm.auth_manager.update_user_acls( + self.dependent.auth_manager.update_user_acls( username=client.username, topic=client.topic, extra_user_roles=client.extra_user_roles, @@ -89,11 +92,11 @@ def on_topic_requested(self, event: TopicRequestedEvent): # non-leader units need cluster_config_changed event to update their super.users self.charm.state.cluster.update({"super-users": self.charm.state.super_users}) - self.charm.update_client_data() + self.dependent.update_client_data() def _on_relation_created(self, event: RelationCreatedEvent) -> None: """Handler for `kafka-client-relation-created` event.""" - self.charm._on_config_changed(event) + self.dependent._on_config_changed(event) def _on_relation_broken(self, event: RelationBrokenEvent) -> None: """Handler for `kafka-client-relation-broken` event. @@ -111,18 +114,18 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None: ): return - if not self.charm.healthy: + if not self.dependent.healthy: event.defer() return if event.relation.app != self.charm.app or not self.charm.app.planned_units() == 0: username = f"relation-{event.relation.id}" - self.charm.auth_manager.remove_all_user_acls(username=username) - self.charm.auth_manager.delete_user(username=username) + self.dependent.auth_manager.remove_all_user_acls(username=username) + self.dependent.auth_manager.delete_user(username=username) # non-leader units need cluster_config_changed event to update their super.users # update on the peer relation data will trigger an update of server properties on all units self.charm.state.cluster.update({username: ""}) - self.charm.update_client_data() + self.dependent.update_client_data() diff --git a/src/events/tls.py b/src/events/tls.py index 41daef77..86dc972a 100644 --- a/src/events/tls.py +++ b/src/events/tls.py @@ -39,7 +39,7 @@ class TLSHandler(Object): """Handler for managing the client and unit TLS keys/certs.""" - def __init__(self, charm): + def __init__(self, charm: "KafkaCharm") -> None: super().__init__(charm, "tls") self.charm: "KafkaCharm" = charm @@ -118,7 +118,8 @@ def _tls_relation_broken(self, _) -> None: ) # remove all existing keystores from the unit so we don't preserve certs - self.charm.tls_manager.remove_stores() + self.charm.broker.tls_manager.remove_stores() + self.charm.balancer.tls_manager.remove_stores() if not self.charm.unit.is_leader(): return @@ -148,7 +149,7 @@ def _trusted_relation_joined(self, event: RelationJoinedEvent) -> None: event.defer() return - alias = self.charm.tls_manager.generate_alias( + alias = self.charm.broker.tls_manager.generate_alias( app_name=event.app.name, relation_id=event.relation.id, ) @@ -189,7 +190,7 @@ def _trusted_relation_changed(self, event: RelationChangedEvent) -> None: event.defer() return - alias = self.charm.tls_manager.generate_alias( + alias = self.charm.broker.tls_manager.generate_alias( event.relation.app.name, event.relation.id, ) @@ -204,7 +205,7 @@ def _trusted_relation_changed(self, event: RelationChangedEvent) -> None: self.charm.workload.write( content=content, path=f"{self.charm.workload.paths.conf_path}/{filename}" ) - self.charm.tls_manager.import_cert(alias=f"{alias}", filename=filename) + self.charm.broker.tls_manager.import_cert(alias=f"{alias}", filename=filename) # ensuring new config gets applied self.charm.on[f"{self.charm.restart.name}"].acquire_lock.emit() @@ -221,13 +222,13 @@ def _trusted_relation_broken(self, event: RelationBrokenEvent) -> None: return # All units will need to remove the cert from their truststore - alias = self.charm.tls_manager.generate_alias( + alias = self.charm.broker.tls_manager.generate_alias( app_name=event.relation.app.name, relation_id=event.relation.id, ) logger.info(f"Removing {alias=} from truststore...") - self.charm.tls_manager.remove_cert(alias=alias) + self.charm.broker.tls_manager.remove_cert(alias=alias) # The leader will also handle removing the "mtls" flag if needed if not self.charm.unit.is_leader(): @@ -261,11 +262,12 @@ def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: {"certificate": event.certificate, "ca-cert": event.ca, "ca": ""} ) - self.charm.tls_manager.set_server_key() - self.charm.tls_manager.set_ca() - self.charm.tls_manager.set_certificate() - self.charm.tls_manager.set_truststore() - self.charm.tls_manager.set_keystore() + for dependent in ["broker", "balancer"]: + getattr(self.charm, dependent).tls_manager.set_server_key() + getattr(self.charm, dependent).tls_manager.set_ca() + getattr(self.charm, dependent).tls_manager.set_certificate() + getattr(self.charm, dependent).tls_manager.set_truststore() + getattr(self.charm, dependent).tls_manager.set_keystore() # single-unit Kafka can lose restart events if it loses connection with TLS-enabled ZK self.charm.on.config_changed.emit() diff --git a/src/events/upgrade.py b/src/events/upgrade.py index aff5cc50..dc042859 100644 --- a/src/events/upgrade.py +++ b/src/events/upgrade.py @@ -4,6 +4,7 @@ """Manager for handling Kafka in-place upgrades.""" import logging +import subprocess import time from typing import TYPE_CHECKING @@ -14,11 +15,14 @@ UpgradeGrantedEvent, verify_requirements, ) +from charms.operator_libs_linux.v0.sysctl import CalledProcessError +from ops.pebble import ExecError from pydantic import BaseModel from typing_extensions import override if TYPE_CHECKING: from charm import KafkaCharm + from events.broker import BrokerOperator logger = logging.getLogger(__name__) @@ -37,9 +41,10 @@ class KafkaDependencyModel(BaseModel): class KafkaUpgrade(DataUpgrade): """Implementation of :class:`DataUpgrade` overrides for in-place upgrades.""" - def __init__(self, charm: "KafkaCharm", **kwargs): - super().__init__(charm, **kwargs) - self.charm = charm + def __init__(self, dependent: "BrokerOperator", **kwargs) -> None: + super().__init__(dependent.charm, **kwargs) + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm @property def idle(self) -> bool: @@ -68,7 +73,7 @@ def post_upgrade_check(self) -> None: @override def pre_upgrade_check(self) -> None: default_message = "Pre-upgrade check failed and cannot safely upgrade" - if not self.charm.healthy: + if not self.dependent.healthy: raise ClusterNotReadyError(message=default_message, cause="Cluster is not healthy") @override @@ -99,17 +104,23 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: self.set_unit_failed() return - self.charm.workload.stop() + self.charm.broker.workload.stop() + try: + self.charm.balancer.workload.stop() + except CalledProcessError: + # cruise control added in charmed-kafka Rev.37 + pass - if not self.charm.workload.install(): + if not self.dependent.workload.install(): logger.error("Unable to install Snap") self.set_unit_failed() return - self.charm.config_manager.set_environment() + self.dependent.config_manager.set_environment() + self.apply_backwards_compatibility_fixes(event) logger.info(f"{self.charm.unit.name} upgrading service...") - self.charm.workload.restart() + self.dependent.workload.restart() # Allow for some time to settle down # FIXME: This logic should be improved as part of ticket DPE-3155 @@ -132,3 +143,22 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: except ClusterNotReadyError as e: logger.error(e.cause) self.set_unit_failed() + + def apply_backwards_compatibility_fixes(self, event) -> None: + """A range of functions needed for backwards compatibility.""" + logger.info("Applying upgrade fixes") + # Rev.38 - Create credentials for missing internal user, to reconcile state during upgrades + if ( + not self.charm.state.cluster.internal_user_credentials + and self.charm.state.zookeeper.zookeeper_connected + ): + try: + internal_user_credentials = self.dependent.zookeeper._create_internal_credentials() + except (KeyError, RuntimeError, subprocess.CalledProcessError, ExecError) as e: + logger.warning(str(e)) + event.defer() + return + + # only set to relation data when all set + for username, password in internal_user_credentials: + self.charm.state.cluster.update({f"{username}-password": password}) diff --git a/src/events/zookeeper.py b/src/events/zookeeper.py index e9f93f38..b632891c 100644 --- a/src/events/zookeeper.py +++ b/src/events/zookeeper.py @@ -12,10 +12,11 @@ from ops import Object, RelationChangedEvent, RelationEvent from ops.pebble import ExecError -from literals import INTERNAL_USERS, ZK, Status +from literals import INTERNAL_USERS, STORAGE, ZK, Status if TYPE_CHECKING: from charm import KafkaCharm + from events.broker import BrokerOperator logger = logging.getLogger(__name__) @@ -23,9 +24,11 @@ class ZooKeeperHandler(Object): """Implements the provider-side logic for client applications relating to Kafka.""" - def __init__(self, charm) -> None: - super().__init__(charm, "zookeeper_client") - self.charm: "KafkaCharm" = charm + def __init__(self, dependent: "BrokerOperator") -> None: + super().__init__(dependent, "zookeeper_client") + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm + self.zookeeper_requires = DatabaseRequirerEventHandlers( self.charm, self.charm.state.zookeeper_requires_interface ) @@ -67,8 +70,8 @@ def _on_zookeeper_changed(self, event: RelationChangedEvent) -> None: if not self.charm.state.cluster.internal_user_credentials and self.model.unit.is_leader(): # loading the minimum config needed to authenticate to zookeeper - self.charm.config_manager.set_zk_jaas_config() - self.charm.config_manager.set_server_properties() + self.dependent.config_manager.set_zk_jaas_config() + self.dependent.config_manager.set_server_properties() try: internal_user_credentials = self._create_internal_credentials() @@ -83,14 +86,14 @@ def _on_zookeeper_changed(self, event: RelationChangedEvent) -> None: # attempt re-start of Kafka for all units on zookeeper-changed # avoids relying on deferred events elsewhere that may not exist after cluster init - if not self.charm.healthy and self.charm.state.cluster.internal_user_credentials: + if not self.dependent.healthy and self.charm.state.cluster.internal_user_credentials: self.charm.on.start.emit() self.charm.on.config_changed.emit() def _on_zookeeper_broken(self, _: RelationEvent) -> None: """Handler for `zookeeper_relation_broken` event, ensuring charm blocks.""" - self.charm.workload.stop() + self.dependent.workload.stop() logger.info(f'Broker {self.model.unit.name.split("/")[1]} disconnected') self.charm._set_status(Status.ZK_NOT_RELATED) @@ -98,8 +101,8 @@ def _on_zookeeper_broken(self, _: RelationEvent) -> None: # Kafka keeps a meta.properties in every log.dir with a unique ClusterID # this ID is provided by ZK, and removing it on relation-broken allows # re-joining to another ZK cluster. - for storage in self.charm.model.storages["data"]: - self.charm.workload.exec(f"rm {storage.location}/meta.properties") + for storage in self.charm.model.storages[STORAGE]: + self.dependent.workload.exec(["rm", f"{storage.location}/meta.properties"]) if not self.charm.unit.is_leader(): return @@ -121,9 +124,11 @@ def _create_internal_credentials(self) -> list[tuple[str, str]]: subprocess.CalledProcessError if command to ZooKeeper failed """ credentials = [ - (username, self.charm.workload.generate_password()) for username in INTERNAL_USERS + (username, self.dependent.workload.generate_password()) for username in INTERNAL_USERS ] for username, password in credentials: - self.charm.auth_manager.add_user(username=username, password=password, zk_auth=True) + self.dependent.auth_manager.add_user( + username=username, password=password, zk_auth=True + ) return credentials diff --git a/src/health.py b/src/health.py index 465b0ddf..1694e690 100644 --- a/src/health.py +++ b/src/health.py @@ -16,6 +16,7 @@ if TYPE_CHECKING: from charm import KafkaCharm + from events.broker import BrokerOperator logger = logging.getLogger(__name__) @@ -23,47 +24,56 @@ class KafkaHealth(Object): """Manager for handling Kafka machine health.""" - def __init__(self, charm) -> None: - super().__init__(charm, "kafka_health") - self.charm: "KafkaCharm" = charm + def __init__(self, dependent: "BrokerOperator") -> None: + super().__init__(dependent, "kafka_health") + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm @property def _service_pid(self) -> int: """Gets most recent Kafka service pid from the snap logs.""" - return self.charm.workload.get_service_pid() + return self.dependent.workload.get_service_pid() def _get_current_memory_maps(self) -> int: """Gets the current number of memory maps for the Kafka process.""" - return int(self.charm.workload.exec(f"cat /proc/{self._service_pid}/maps | wc -l")) + return int( + self.dependent.workload.exec( + ["bash", "-c", f"cat /proc/{self._service_pid}/maps | wc -l"] + ) + ) def _get_current_max_files(self) -> int: """Gets the current file descriptor limit for the Kafka process.""" return int( - self.charm.workload.exec( - rf"cat /proc/{self._service_pid}/limits | grep files | awk '{{print $5}}'" + self.dependent.workload.exec( + [ + "bash", + "-c", + rf"cat /proc/{self._service_pid}/limits | grep files | awk '{{print $5}}'", + ] ) ) def _get_max_memory_maps(self) -> int: """Gets the current memory map limit for the machine.""" - return int(self.charm.workload.exec("sysctl -n vm.max_map_count")) + return int(self.dependent.workload.exec(["sysctl", "-n", "vm.max_map_count"])) def _get_vm_swappiness(self) -> int: """Gets the current vm.swappiness configured for the machine.""" - return int(self.charm.workload.exec("sysctl -n vm.swappiness")) + return int(self.dependent.workload.exec(["sysctl", "-n", "vm.swappiness"])) def _get_partitions_size(self) -> tuple[int, int]: """Gets the number of partitions and their average size from the log dirs.""" log_dirs_command = [ "--describe", f"--bootstrap-server {self.charm.state.bootstrap_server}", - f"--command-config {self.charm.workload.paths.client_properties}", + f"--command-config {self.dependent.workload.paths.client_properties}", ] try: - log_dirs = self.charm.workload.run_bin_command( + log_dirs = self.dependent.workload.run_bin_command( bin_keyword="log-dirs", bin_args=log_dirs_command, - opts=[self.charm.config_manager.tools_log4j_opts], + opts=[self.dependent.config_manager.tools_log4j_opts], ) except subprocess.CalledProcessError: return (0, 0) @@ -112,7 +122,7 @@ def _check_memory_maps(self) -> bool: def _check_file_descriptors(self) -> bool: """Checks that the number of used file descriptors is not approaching threshold.""" - if not self.charm.config_manager.client_listeners: + if not self.dependent.config_manager.client_listeners: return True total_partitions, average_partition_size = self._get_partitions_size() @@ -144,7 +154,7 @@ def _check_vm_swappiness(self) -> bool: def _check_total_memory(self) -> bool: """Checks that the total available memory is sufficient for desired profile.""" - if not (meminfo := self.charm.workload.read(path="/proc/meminfo")): + if not (meminfo := self.dependent.workload.read(path="/proc/meminfo")): return False total_memory_gb = int(meminfo[0].split()[1]) / 1000000 diff --git a/src/literals.py b/src/literals.py index 1e9e154f..84d659c4 100644 --- a/src/literals.py +++ b/src/literals.py @@ -12,12 +12,10 @@ CHARM_KEY = "kafka" SNAP_NAME = "charmed-kafka" -CHARMED_KAFKA_SNAP_REVISION = 37 +CHARMED_KAFKA_SNAP_REVISION = 39 CONTAINER = "kafka" SUBSTRATE = "vm" - -USER = "snap_daemon" -GROUP = "root" +STORAGE = "data" # FIXME: these need better names PEER = "cluster" @@ -27,11 +25,22 @@ TLS_RELATION = "certificates" TRUSTED_CERTIFICATE_RELATION = "trusted-certificate" TRUSTED_CA_RELATION = "trusted-ca" +PEER_CLUSTER_RELATION = "peer-cluster" +PEER_CLUSTER_ORCHESTRATOR_RELATION = "peer-cluster-orchestrator" +BALANCER_TOPICS = [ + "__CruiseControlMetrics", + "__KafkaCruiseControlPartitionMetricSamples", + "__KafkaCruiseControlBrokerMetricSamples", +] +MIN_REPLICAS = 3 + INTER_BROKER_USER = "sync" ADMIN_USER = "admin" INTERNAL_USERS = [INTER_BROKER_USER, ADMIN_USER] -SECRETS_APP = [f"{user}-password" for user in INTERNAL_USERS] +BALANCER_WEBSERVER_USER = "balancer" +BALANCER_WEBSERVER_PORT = 9090 +SECRETS_APP = [f"{user}-password" for user in INTERNAL_USERS + [BALANCER_WEBSERVER_USER]] SECRETS_UNIT = [ "ca-cert", "csr", @@ -42,6 +51,7 @@ ] JMX_EXPORTER_PORT = 9101 +JMX_CC_PORT = 9102 METRICS_RULES_DIR = "./src/alert_rules/prometheus" LOGS_RULES_DIR = "./src/alert_rules/loki" @@ -70,11 +80,20 @@ "vm.dirty_background_ratio": "5", } + PATHS = { - "CONF": f"/var/snap/{SNAP_NAME}/current/etc/kafka", - "LOGS": f"/var/snap/{SNAP_NAME}/common/var/log/kafka", - "DATA": f"/var/snap/{SNAP_NAME}/common/var/lib/kafka", - "BIN": f"/snap/{SNAP_NAME}/current/opt/kafka", + "kafka": { + "CONF": f"/var/snap/{SNAP_NAME}/current/etc/kafka", + "LOGS": f"/var/snap/{SNAP_NAME}/common/var/log/kafka", + "DATA": f"/var/snap/{SNAP_NAME}/common/var/lib/kafka", + "BIN": f"/snap/{SNAP_NAME}/current/opt/kafka", + }, + "cruise-control": { + "CONF": f"/var/snap/{SNAP_NAME}/current/etc/cruise-control", + "LOGS": f"/var/snap/{SNAP_NAME}/common/var/log/cruise-control", + "DATA": f"/var/snap/{SNAP_NAME}/common/var/lib/cruise-control", + "BIN": f"/snap/{SNAP_NAME}/current/opt/cruise-control", + }, } @@ -95,6 +114,77 @@ class Ports: } +@dataclass +class Role: + value: str + service: str + paths: dict[str, str] + relation: str + requested_secrets: list[str] | None = None + + def __eq__(self, value: object, /) -> bool: + """Provide an easy comparison to the configuration key.""" + return self.value == value + + +BROKER = Role( + value="broker", + service="daemon", + paths=PATHS["kafka"], + relation=PEER_CLUSTER_RELATION, + requested_secrets=[ + "balancer-username", + "balancer-password", + "balancer-uris", + ], +) +BALANCER = Role( + value="balancer", + service="cruise-control", + paths=PATHS["cruise-control"], + relation=PEER_CLUSTER_ORCHESTRATOR_RELATION, + requested_secrets=[ + "broker-username", + "broker-password", + "broker-uris", + "zk-username", + "zk-password", + "zk-uris", + ], +) + +DEFAULT_BALANCER_GOALS = [ + "ReplicaCapacity", + "DiskCapacity", + "NetworkInboundCapacity", + "NetworkOutboundCapacity", + "CpuCapacity", + "ReplicaDistribution", + "PotentialNwOut", + "DiskUsageDistribution", + "NetworkInboundUsageDistribution", + "NetworkOutboundUsageDistribution", + "CpuUsageDistribution", + "LeaderReplicaDistribution", + "LeaderBytesInDistribution", + "TopicReplicaDistribution", + "PreferredLeaderElection", +] +HARD_BALANCER_GOALS = [ + "ReplicaCapacity", + "DiskCapacity", + "NetworkInboundCapacity", + "NetworkOutboundCapacity", + "CpuCapacity", + "ReplicaDistribution", +] + + +MODE_FULL = "full" +MODE_ADD = "add" +MODE_REMOVE = "remove" + + @dataclass class StatusLevel: """Status object helper.""" @@ -108,8 +198,17 @@ class Status(Enum): ACTIVE = StatusLevel(ActiveStatus(), "DEBUG") NO_PEER_RELATION = StatusLevel(MaintenanceStatus("no peer relation yet"), "DEBUG") + NO_PEER_CLUSTER_RELATION = StatusLevel( + BlockedStatus("missing required peer-cluster relation"), "DEBUG" + ) SNAP_NOT_INSTALLED = StatusLevel(BlockedStatus(f"unable to install {SNAP_NAME} snap"), "ERROR") - SNAP_NOT_RUNNING = StatusLevel(BlockedStatus("snap service not running"), "WARNING") + BROKER_NOT_RUNNING = StatusLevel( + BlockedStatus(f"{SNAP_NAME}.{BROKER.service} snap service not running"), "WARNING" + ) + NOT_ALL_RELATED = StatusLevel(MaintenanceStatus("not all units related"), "DEBUG") + CC_NOT_RUNNING = StatusLevel( + BlockedStatus(f"{SNAP_NAME}.{BALANCER.service} snap service not running"), "WARNING" + ) ZK_NOT_RELATED = StatusLevel(BlockedStatus("missing required zookeeper relation"), "DEBUG") ZK_NOT_CONNECTED = StatusLevel(BlockedStatus("unit not connected to zookeeper"), "ERROR") ZK_TLS_MISMATCH = StatusLevel( @@ -142,6 +241,18 @@ class Status(Enum): BlockedStatus("sysctl params cannot be set. Is the machine running on a container?"), "WARNING", ) + NOT_IMPLEMENTED = StatusLevel( + BlockedStatus("feature not yet implemented"), + "WARNING", + ) + NO_BALANCER_RELATION = StatusLevel(MaintenanceStatus("no balancer relation yet"), "DEBUG") + NO_BROKER_DATA = StatusLevel(MaintenanceStatus("missing broker data"), "DEBUG") + NOT_ENOUGH_BROKERS = StatusLevel( + WaitingStatus(f"waiting for {MIN_REPLICAS} online brokers"), "DEBUG" + ) + WAITING_FOR_REBALANCE = StatusLevel( + WaitingStatus("awaiting completion of rebalance task"), "DEBUG" + ) DEPENDENCIES = { diff --git a/src/managers/balancer.py b/src/managers/balancer.py new file mode 100644 index 00000000..00d172ac --- /dev/null +++ b/src/managers/balancer.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling Balancer.""" + +import json +import logging +import time +from typing import TYPE_CHECKING, Any + +import requests + +from core.models import JSON +from literals import BALANCER, BALANCER_TOPICS, MODE_FULL, STORAGE + +if TYPE_CHECKING: + from charm import KafkaCharm + from events.balancer import BalancerOperator + from events.broker import BrokerOperator + + +logger = logging.getLogger(__name__) + + +class CruiseControlClient: + """Client wrapper for CruiseControl.""" + + def __init__(self, username: str, password: str, host: str = "localhost", port: int = 9090): + self.username = username + self.password = password + self.address = f"http://{host}:{port}/kafkacruisecontrol" + self.default_params = {"json": "True"} + + def get(self, endpoint: str, **kwargs) -> requests.Response: + """CruiseControl GET request. + + Args: + endpoint: the REST API endpoint to GET. + e.g `state`, `load`, `user_tasks` + **kwargs: any REST API query parameters provided by that endpoint + """ + r = requests.get( + url=f"{self.address}/{endpoint}", + auth=(self.username, self.password), + params=kwargs | self.default_params, + ) + logger.debug(f"GET {endpoint} - {vars(r)}") + + return r + + def post(self, endpoint: str, dryrun: bool = False, **kwargs) -> requests.Response: + """CruiseControl POST request. + + Args: + endpoint: the REST API endpoint to POST. + e.g `add_broker`, `demote_broker`, `rebalance` + dryrun: flag to decide whether to return only proposals (True), or execute (False) + **kwargs: any REST API query parameters provided by that endpoint + """ + payload = {"dryrun": str(dryrun)} + if brokerid := kwargs.get("brokerid", None) is not None: + payload |= {"brokerid": brokerid} + + r = requests.post( + url=f"{self.address}/{endpoint}", + auth=(self.username, self.password), + params=kwargs | payload | self.default_params, + ) + logger.debug(f"POST {endpoint} - {vars(r)}") + + return r + + def get_task_status(self, user_task_id: str) -> str: + """Gets the task status from the `user_tasks` API endpoint for the provided `user_task_id`. + + Returns: + The status of the task + e.g 'Completed', 'CompletedWithError', 'Active' + """ + for task in self.get(endpoint="user_tasks").json().get("userTasks"): + if task.get("UserTaskId", "") == user_task_id: + return task.get("Status", "") + + return "" + + @property + def monitoring(self) -> bool: + """Flag to confirm that the CruiseControl Monitor is up-and-running.""" + return ( + self.get(endpoint="state", verbose="True") + .json() + .get("MonitorState", {}) + .get("state", "") + == "RUNNING" + ) + + @property + def executing(self) -> bool: + """Flag to confirm that the CruiseControl Executor is currently executing a task.""" + return ( + self.get(endpoint="state", verbose="True") + .json() + .get("ExecutorState", {}) + .get("state", "") + != "NO_TASK_IN_PROGRESS" + ) + + @property + def ready(self) -> bool: + """Flag to confirm that the CruiseControl Analyzer is ready to generate proposals.""" + monitor_state = self.get(endpoint="state", verbose="True").json().get("MonitorState", "") + return all( + [ + monitor_state.get("numMonitoredWindows", 0), + monitor_state.get("numValidPartitions", 0), + ] + ) + + +class BalancerManager: + """Manager for handling Balancer.""" + + def __init__(self, dependent: "BrokerOperator | BalancerOperator") -> None: + self.dependent = dependent + self.charm: "KafkaCharm" = dependent.charm + + @property + def cruise_control(self) -> CruiseControlClient: + """Client for the CruiseControl REST API.""" + return CruiseControlClient( + username=self.charm.state.balancer.balancer_username, + password=self.charm.state.balancer.balancer_password, + ) + + @property + def cores(self) -> int: + """Gets the total number of CPU cores for the machine.""" + return int(self.dependent.workload.exec(["nproc", "--all"])) + + @property + def storages(self) -> str: + """A string of JSON containing key storage-path, value storage size for all unit storages.""" + return json.dumps( + { + str(storage.location): str( + self._get_storage_size(path=storage.location.absolute().as_posix()) + ) + for storage in self.charm.model.storages[STORAGE] + } + ) + + def create_internal_topics(self) -> None: + """Create Cruise Control topics.""" + bootstrap_servers = self.charm.state.balancer.broker_uris + property_file = f'{BALANCER.paths["CONF"]}/cruisecontrol.properties' + + for topic in BALANCER_TOPICS: + if topic not in self.dependent.workload.run_bin_command( + "topics", + [ + "--list", + "--bootstrap-server", + bootstrap_servers, + "--command-config", + property_file, + ], + ): + self.dependent.workload.run_bin_command( + "topics", + [ + "--create", + "--topic", + topic, + "--bootstrap-server", + bootstrap_servers, + "--command-config", + property_file, + ], + ) + logger.info(f"Created topic {topic}") + + def rebalance( + self, mode: str, dryrun: bool = True, brokerid: int | None = None + ) -> tuple[requests.Response, str]: + """Triggers a full Kafka cluster partition rebalance. + + Returns: + Tuple of requests.Response and string of the CruiseControl User-Task-ID for the rebalance + """ + mode = f"{mode}_broker" if mode != MODE_FULL else mode + rebalance_request = self.cruise_control.post( + endpoint=mode, dryrun=dryrun, brokerid=brokerid + ) + + return (rebalance_request, rebalance_request.headers.get("User-Task-ID", "")) + + def wait_for_task(self, user_task_id: str) -> None: + """Waits for the provided User-Task-ID to complete execution.""" + # block entire charm event handling while rebalance in progress + while ( + "Completed" not in self.cruise_control.get_task_status(user_task_id=user_task_id) + or self.cruise_control.executing + ): + logger.info(f"Waiting for task execution to finish for {user_task_id=}...") + time.sleep(10) # sleep needed as CC API rejects too many requests within a short time + + def _get_storage_size(self, path: str) -> int: + """Gets the total storage volume of a mounted filepath, in KB.""" + return int( + self.dependent.workload.exec(["bash", "-c", f"df --output=size {path} | sed 1d"]) + ) + + def _build_new_key(self, nested_key: str, nested_value: JSON) -> dict[str, JSON]: + """Builds a nested key:value pair for JSON lists from the output of a rebalance proposal. + + The keys where this is needed are `brokers`, `hosts` and `goalSummary` goals. Turns this: + ``` + "loadAfterOptimization": { + "brokers": [ + { + "BrokerState": "ALIVE", + "Broker": 0, + }, + { + "BrokerState": "ALIVE", + "Broker": 1, + }, + } + ``` + + into this: + + ``` + "loadAfterOptimization": { + "brokers.0": + { + "brokerstate": "ALIVE", + "broker": 0, + }, + "brokers.1": + { + "brokerstate": "ALIVE", + "broker": 1, + }, + } + ``` + + """ + mapping = {"brokers": "Broker", "hosts": "Host", "goalSummary": "goal"} + label_key = mapping.get(nested_key, "") + + if not (label_key and isinstance(nested_value, list)): + return {} + + nested_dict = {} + for item in nested_value: + if not isinstance(item, dict): + continue + + label_value = item.get(label_key) + clean_label_value = self._sanitise_key(label_value) + new_key = f"{nested_key}.{clean_label_value}".lower() + + nested_dict[new_key] = self.clean_results(item) # continue recursing + + return nested_dict + + def _sanitise_key(self, key: Any) -> Any: + """Sanitises keys for passing as Juju Actions results. + + When calling `event.set_results(dict)`, the passed dict has some limitations: + - All keys must be lower-case with no special characters, must be similar to 'key', 'some-key2', or 'some.key' + - Non-string types will be forced in to a 'str()' shape + """ + if not isinstance(key, str): + return key + + return key.replace(".", "-").replace("_", "-").lower() + + def clean_results(self, value: JSON) -> JSON: + """Recursively cleans JSON responses returned from the CruiseControl API, for passing to Action results.""" + if isinstance(value, list): + return [self.clean_results(item) for item in value] + + elif isinstance(value, dict): + nested_dict = {} + for nested_key, nested_value in value.items(): + if new_key := self._build_new_key(nested_key, nested_value): + nested_dict.update(new_key) + else: + nested_dict[self._sanitise_key(nested_key)] = self.clean_results(nested_value) + + return nested_dict + + else: + return value diff --git a/src/managers/config.py b/src/managers/config.py index 3ff5950c..adf263b4 100644 --- a/src/managers/config.py +++ b/src/managers/config.py @@ -5,17 +5,25 @@ """Manager for handling Kafka configuration.""" import inspect +import json import logging import os import re import textwrap +from abc import abstractmethod +from typing import Iterable + +from typing_extensions import override from core.cluster import ClusterState from core.structured_config import CharmConfig, LogLevel from core.workload import WorkloadBase from literals import ( ADMIN_USER, + DEFAULT_BALANCER_GOALS, + HARD_BALANCER_GOALS, INTER_BROKER_USER, + JMX_CC_PORT, JMX_EXPORTER_PORT, JVM_MEM_MAX_GB, JVM_MEM_MIN_GB, @@ -32,8 +40,15 @@ authorizer.class.name=kafka.security.authorizer.AclAuthorizer allow.everyone.if.no.acl.found=false auto.create.topics.enable=false +metric.reporters=com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporter +""" +CRUISE_CONTROL_CONFIG_OPTIONS = """ +metric.reporter.topic=__CruiseControlMetrics +sample.store.class=com.linkedin.kafka.cruisecontrol.monitor.sampling.KafkaSampleStore +partition.metric.sample.store.topic=__KafkaCruiseControlPartitionMetricSamples +broker.metric.sample.store.topic=__KafkaCruiseControlModelTrainingSamples +max.active.user.tasks=10 """ - SERVER_PROPERTIES_BLACKLIST = ["profile", "log_level", "certificate_extra_sans"] @@ -100,20 +115,12 @@ def advertised_listener(self) -> str: return f"{self.name}://{self.host}:{self.port}" -class ConfigManager: - """Manager for handling Kafka configuration.""" +class CommonConfigManager: + """Common options for managing Kafka configuration.""" - def __init__( - self, - state: ClusterState, - workload: WorkloadBase, - config: CharmConfig, - current_version: str, - ): - self.state = state - self.workload = workload - self.config = config - self.current_version = current_version + config: CharmConfig + workload: WorkloadBase + state: ClusterState @property def log_level(self) -> str: @@ -129,7 +136,7 @@ def log_level(self) -> str: return f"KAFKA_CFG_LOGLEVEL={self.config.log_level}" @property - def jmx_opts(self) -> str: + def kafka_jmx_opts(self) -> str: """The JMX options for configuring the prometheus exporter. Returns: @@ -142,6 +149,20 @@ def jmx_opts(self) -> str: return f"KAFKA_JMX_OPTS='{' '.join(opts)}'" + @property + def cc_jmx_opts(self) -> str: + """The JMX options for configuring the prometheus exporter on cruise control. + + Returns: + String of JMX options + """ + opts = [ + "-Dcom.sun.management.jmxremote", + f"-javaagent:{self.workload.paths.jmx_prometheus_javaagent}={JMX_CC_PORT}:{self.workload.paths.jmx_cc_config}", + ] + + return f"CC_JMX_OPTS='{' '.join(opts)}'" + @property def tools_log4j_opts(self) -> str: """The Log4j options for configuring the tooling logging. @@ -150,11 +171,31 @@ def tools_log4j_opts(self) -> str: String of Log4j options """ opts = [ - '-Dlog4j.configuration=file:{self.workload.paths.tools_log4j_properties} -Dcharmed.kafka.log.level={self.log_level.split("=")[1]}' + f'-Dlog4j.configuration=file:{self.workload.paths.tools_log4j_properties} -Dcharmed.kafka.log.level={self.log_level.split("=")[1]}' ] return f"KAFKA_LOG4J_OPTS='{' '.join(opts)}'" + @property + @abstractmethod + def kafka_opts(self) -> str: + """Extra Java config options. + + Returns: + String of Java config options + """ + ... + + @property + @abstractmethod + def jaas_config(self) -> str: + """Builds the JAAS config for Client/KafkaClient authentication. + + Returns: + String of JAAS config for ZooKeeper or Kafka authentication. + """ + ... + @property def jvm_performance_opts(self) -> str: """The JVM config options for tuning performance settings. @@ -190,12 +231,34 @@ def heap_opts(self) -> str: return f"KAFKA_HEAP_OPTS='{' '.join(opts)}'" @property - def kafka_opts(self) -> str: - """Extra Java config options. + def security_protocol(self) -> AuthProtocol: + """Infers current charm security.protocol based on current relations.""" + # FIXME: When we have multiple auth_mechanims/listeners, remove this method + return ( + "SASL_SSL" + if (self.state.cluster.tls_enabled and self.state.unit_broker.certificate) + else "SASL_PLAINTEXT" + ) - Returns: - String of Java config options - """ + +class ConfigManager(CommonConfigManager): + """Manager for handling Kafka configuration.""" + + def __init__( + self, + state: ClusterState, + workload: WorkloadBase, + config: CharmConfig, + current_version: str = "", + ): + self.state = state + self.workload = workload + self.config = config + self.current_version = current_version + + @property + @override + def kafka_opts(self) -> str: opts = [ f"-Djava.security.auth.login.config={self.workload.paths.zk_jaas}", ] @@ -383,7 +446,11 @@ def internal_listener(self) -> Listener: def client_listeners(self) -> list[Listener]: """Return a list of extra listeners.""" protocol_mechanism_dict: list[tuple[AuthProtocol, AuthMechanism]] = [] - if self.state.client_relations: + + related_clients = bool(self.state.client_relations) + balancer_involved = self.state.runs_balancer or self.state.peer_cluster_relation + + if related_clients or balancer_involved: protocol_mechanism_dict.append((self.security_protocol, "SCRAM-SHA-512")) if self.state.oauth_relation: protocol_mechanism_dict.append((self.security_protocol, "OAUTHBEARER")) @@ -423,34 +490,62 @@ def rack_properties(self) -> list[str]: Returns: List of properties to be set """ - # TODO: not sure if we should make this an instance attribute like the other paths rack_path = f"{self.workload.paths.conf_path}/rack.properties" return self.workload.read(rack_path) or [] @property - def client_properties(self) -> list[str]: - """Builds all properties necessary for running an admin Kafka client. + def rack(self) -> str: + """The rack for the current running unit, determined from a manually added `rack.properties`. + + Returns: + String of broker.rack value. + """ + for item in self.rack_properties: + if "broker.rack" in item: + return item.split("=")[1] + + return "" + + def _build_internal_client_properties( + self, username: str, prefix: str | None = None + ) -> list[str]: + """Builds all properties necessary for running an internal Kafka client. This includes SASL/SCRAM auth and security mechanisms. + Args: + username: the username to set. Must be from `INTERNAL_USERS` + prefix: any prefix to assign to the properties to indicate a specific client + e.g `cruise.control.metrics.reporter` -> `cruise.control.metrics.reporter.bootstrap.servers` + Returns: - List of properties to be set + List of properties to be set on the Kafka broker """ - username = ADMIN_USER - password = self.state.cluster.internal_user_credentials.get(ADMIN_USER, "") + password = self.state.cluster.internal_user_credentials.get(username, "") - client_properties = [ + properties = [ f'sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="{username}" password="{password}";', "sasl.mechanism=SCRAM-SHA-512", f"security.protocol={self.security_protocol}", - # FIXME: security.protocol will need changing once multiple listener auth schemes f"bootstrap.servers={self.state.bootstrap_server}", ] if self.state.cluster.tls_enabled and self.state.unit_broker.certificate: - client_properties += self.tls_properties + properties += self.tls_properties + + return [f"{prefix}.{prop}" if prefix else prop for prop in properties] - return client_properties + @property + def client_properties(self) -> list[str]: + """Builds all properties necessary for running an admin Kafka client.""" + return self._build_internal_client_properties(username=ADMIN_USER) + + @property + def metrics_reporter_properties(self) -> list[str]: + """Builds all the properties necessary for running the CruiseControlMetricsReporter client.""" + return self._build_internal_client_properties( + username=ADMIN_USER, prefix="cruise.control.metrics.reporter" + ) @property def server_properties(self) -> list[str]: @@ -484,6 +579,7 @@ def server_properties(self) -> list[str]: + self.default_replication_properties + self.auth_properties + self.rack_properties + + self.metrics_reporter_properties + DEFAULT_CONFIG_OPTIONS.split("\n") ) @@ -502,12 +598,8 @@ def config_properties(self) -> list[str]: ] @property - def zk_jaas_config(self) -> str: - """Builds the JAAS config for Client authentication with ZooKeeper. - - Returns: - String of Jaas config for ZooKeeper auth - """ + @override + def jaas_config(self) -> str: return inspect.cleandoc( f""" Client {{ @@ -515,12 +607,12 @@ def zk_jaas_config(self) -> str: username="{self.state.zookeeper.username}" password="{self.state.zookeeper.password}"; }}; - """ + """ ) def set_zk_jaas_config(self) -> None: """Writes the ZooKeeper JAAS config using ZooKeeper relation data.""" - self.workload.write(content=self.zk_jaas_config, path=self.workload.paths.zk_jaas) + self.workload.write(content=self.jaas_config, path=self.workload.paths.zk_jaas) def set_server_properties(self) -> None: """Writes all Kafka config properties to the `server.properties` path.""" @@ -538,22 +630,12 @@ def set_environment(self) -> None: """Writes the env-vars needed for passing to charmed-kafka service.""" updated_env_list = [ self.kafka_opts, - self.jmx_opts, + self.kafka_jmx_opts, self.jvm_performance_opts, self.heap_opts, self.log_level, ] - def map_env(env: list[str]) -> dict[str, str]: - map_env = {} - for var in env: - key = "".join(var.split("=", maxsplit=1)[0]) - value = "".join(var.split("=", maxsplit=1)[1:]) - if key: - # only check for keys, as we can have an empty value for a variable - map_env[key] = value - return map_env - raw_current_env = self.workload.read("/etc/environment") current_env = map_env(raw_current_env) @@ -569,3 +651,208 @@ def _translate_config_key(key: str): String with Kafka configuration name to be placed in the server.properties file """ return key.replace("_", ".") if key not in SERVER_PROPERTIES_BLACKLIST else f"# {key}" + + +class BalancerConfigManager(CommonConfigManager): + """Manager for handling Balancer configuration.""" + + def __init__( + self, + state: ClusterState, + workload: WorkloadBase, + config: CharmConfig, + ): + self.state = state + self.workload = workload + self.config = config + + @property + @override + def kafka_opts(self) -> str: + opts = [ + f"-Djava.security.auth.login.config={self.workload.paths.balancer_jaas}", + ] + + return f"KAFKA_OPTS='{' '.join(opts)}'" + + @property + def balance_thresholds(self) -> list[str]: + """Properties for managing variance in inter-broker resource usage.""" + balance_threshold = self.config.cruisecontrol_balance_threshold + return [ + f"cpu.balance.threshold={balance_threshold}", + f"disk.balance.threshold={balance_threshold}", + f"network.inbound.balance.threshold={balance_threshold}", + f"network.outbound.balance.threshold={balance_threshold}", + f"replica.count.balance.threshold={balance_threshold}", + f"leader.replica.count.balance.threshold={balance_threshold}", + ] + + @property + def capacity_thresholds(self) -> list[str]: + """Properties for managing broker resource usage total capacity.""" + capacity_threshold = self.config.cruisecontrol_capacity_threshold + return [ + f"disk.capacity.threshold={capacity_threshold}", + f"cpu.capacity.threshold={capacity_threshold}", + f"network.inbound.capacity.threshold={capacity_threshold}", + f"network.outbound.capacity.threshold={capacity_threshold}", + ] + + @property + def goals(self) -> list[str]: + """Builds all pluggable Goals properties for CruiseControl. + + Returns: + List of properties to be set + """ + goals = DEFAULT_BALANCER_GOALS + + if self.state.balancer.racks: + if ( + min([3, len(self.state.balancer.broker_capacities["brokerCapacities"])]) + > self.state.balancer.racks + ): # replication-factor > racks is not ideal + goals = goals + ["RackAwareDistribution"] + else: + goals = goals + ["RackAware"] + + default_goals = [ + f"com.linkedin.kafka.cruisecontrol.analyzer.goals.{goal}Goal" for goal in goals + ] + + return [ + f"default.goals={','.join(default_goals)}", + f"goals={','.join(default_goals)}", + f"hard.goals={','.join([goal for goal in default_goals if any(hard_goal in goal for hard_goal in HARD_BALANCER_GOALS)])}", + ] + + @property + def cc_zookeeper_tls_properties(self) -> list[str]: + """Builds the properties necessary for SSL connections to ZooKeeper. + + Returns: + List of properties to be set + """ + return [ + "zookeeper.ssl.client.enable=true", + f"zookeeper.ssl.truststore.location={self.workload.paths.truststore}", + f"zookeeper.ssl.truststore.password={self.state.unit_broker.truststore_password}", + "zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty", + ] + + @property + def cc_tls_properties(self) -> list[str]: + """Builds the properties necessary for TLS authentication. + + Returns: + List of properties to be set + """ + return [ + f"ssl.truststore.location={self.workload.paths.truststore}", + f"ssl.truststore.password={self.state.unit_broker.truststore_password}", + f"ssl.keystore.location={self.workload.paths.keystore}", + f"ssl.keystore.password={self.state.unit_broker.keystore_password}", + "ssl.client.auth=none", # TODO mTLS related. Will need changing if mTLS is introduced + ] + + @property + def cruise_control_properties(self) -> list[str]: + """Builds all properties necessary for starting Cruise Control service. + + Returns: + List of properties to be set + """ + properties = ( + [ + f"bootstrap.servers={self.state.balancer.broker_uris}", + f"zookeeper.connect={self.state.balancer.zk_uris}", + "zookeeper.security.enabled=true", + f'sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="{self.state.balancer.broker_username}" password="{self.state.balancer.broker_password}";', + "sasl.mechanism=SCRAM-SHA-512", + f"security.protocol={self.security_protocol}", + f"capacity.config.file={self.workload.paths.capacity_jbod_json}", + "webserver.security.enable=true", + f"webserver.auth.credentials.file={self.workload.paths.cruise_control_auth}", + ] + + CRUISE_CONTROL_CONFIG_OPTIONS.split("\n") + + self.goals + ) + + if self.state.cluster.tls_enabled and self.state.unit_broker.certificate: + properties += self.cc_tls_properties + self.cc_zookeeper_tls_properties + + return properties + + @property + @override + def jaas_config(self) -> str: + return inspect.cleandoc( + f""" + Client {{ + org.apache.zookeeper.server.auth.DigestLoginModule required + username="{self.state.balancer.zk_username}" + password="{self.state.balancer.zk_password}"; + }}; + """ + ) + + def set_zk_jaas_config(self) -> None: + """Writes the ZooKeeper JAAS config using Balancer relation data.""" + self.workload.write(content=self.jaas_config, path=self.workload.paths.balancer_jaas) + + def set_cruise_control_properties(self) -> None: + """Writes all Cruise Control properties to the `cruisecontrol.properties` path.""" + self.workload.write( + content="\n".join(self.cruise_control_properties), + path=self.workload.paths.cruise_control_properties, + ) + + def set_broker_capacities(self) -> None: + """Writes all broker storage capacities to `capacityJBOD.json`.""" + self.workload.write( + content=json.dumps(self.state.balancer.broker_capacities), + path=self.workload.paths.capacity_jbod_json, + ) + + def set_environment(self) -> None: + """Writes the env-vars needed for passing to charmed-kafka service. + + We avoid overwriting KAFKA_OPTS in /etc/environment because it is only used by the broker. + """ + updated_env_list = [ + self.kafka_jmx_opts, + self.cc_jmx_opts, + self.jvm_performance_opts, + self.heap_opts, + self.log_level, + self.kafka_opts, + ] + + raw_current_env = self.workload.read("/etc/environment") + current_env = map_env(raw_current_env) + + updated_env = current_env | map_env(updated_env_list) + content = "\n".join([f"{key}={value}" for key, value in updated_env.items()]) + + if not self.state.runs_broker: + self.workload.write(content=content, path="/etc/environment") + + def set_cruise_control_auth(self) -> None: + """Write the credentials file for Cruise Control authentication.""" + self.workload.write( + content=f"{self.state.cluster.balancer_username}: {self.state.cluster.balancer_password},ADMIN\n", + path=self.workload.paths.cruise_control_auth, + ) + + +def map_env(env: Iterable[str]) -> dict[str, str]: + """Parse env var into a dict.""" + map_env = {} + for var in env: + key = "".join(var.split("=", maxsplit=1)[0]) + value = "".join(var.split("=", maxsplit=1)[1:]) + if key: + # only check for keys, as we can have an empty value for a variable + map_env[key] = value + return map_env diff --git a/src/managers/tls.py b/src/managers/tls.py index 2b950afc..526990ab 100644 --- a/src/managers/tls.py +++ b/src/managers/tls.py @@ -66,9 +66,9 @@ def set_truststore(self) -> None: """Adds CA to JKS truststore.""" command = f"{self.keytool} -import -v -alias ca -file ca.pem -keystore truststore.jks -storepass {self.state.unit_broker.truststore_password} -noprompt" try: - self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) - self.workload.exec(f"chown {USER}:{GROUP} {self.workload.paths.truststore}") - self.workload.exec(f"chmod 770 {self.workload.paths.truststore}") + self.workload.exec(command=command.split(), working_dir=self.workload.paths.conf_path) + self.workload.exec(["chown", f"{USER}:{GROUP}", f"{self.workload.paths.truststore}"]) + self.workload.exec(["chmod", "770", f"{self.workload.paths.truststore}"]) except (subprocess.CalledProcessError, ExecError) as e: # in case this reruns and fails if e.stdout and "already exists" in e.stdout: @@ -80,9 +80,9 @@ def set_keystore(self) -> None: """Creates and adds unit cert and private-key to the keystore.""" command = f"openssl pkcs12 -export -in server.pem -inkey server.key -passin pass:{self.state.unit_broker.keystore_password} -certfile server.pem -out keystore.p12 -password pass:{self.state.unit_broker.keystore_password}" try: - self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) - self.workload.exec(f"chown {USER}:{GROUP} {self.workload.paths.keystore}") - self.workload.exec(f"chmod 770 {self.workload.paths.keystore}") + self.workload.exec(command=command.split(), working_dir=self.workload.paths.conf_path) + self.workload.exec(["chown", f"{USER}:{GROUP}", f"{self.workload.paths.keystore}"]) + self.workload.exec(["chmod", "770", f"{self.workload.paths.keystore}"]) except (subprocess.CalledProcessError, ExecError) as e: logger.error(e.stdout) raise e @@ -91,7 +91,7 @@ def import_cert(self, alias: str, filename: str) -> None: """Add a certificate to the truststore.""" command = f"{self.keytool} -import -v -alias {alias} -file {filename} -keystore truststore.jks -storepass {self.state.unit_broker.truststore_password} -noprompt" try: - self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) + self.workload.exec(command=command.split(), working_dir=self.workload.paths.conf_path) except (subprocess.CalledProcessError, ExecError) as e: # in case this reruns and fails if e.stdout and "already exists" in e.stdout: @@ -104,8 +104,10 @@ def remove_cert(self, alias: str) -> None: """Remove a cert from the truststore.""" try: command = f"{self.keytool} -delete -v -alias {alias} -keystore truststore.jks -storepass {self.state.unit_broker.truststore_password} -noprompt" - self.workload.exec(command=command, working_dir=self.workload.paths.conf_path) - self.workload.exec(f"rm -f {alias}.pem", working_dir=self.workload.paths.conf_path) + self.workload.exec(command=command.split(), working_dir=self.workload.paths.conf_path) + self.workload.exec( + ["rm", "-f", f"{alias}.pem"], working_dir=self.workload.paths.conf_path + ) except (subprocess.CalledProcessError, ExecError) as e: if e.stdout and "does not exist" in e.stdout: logger.warning(e.stdout) @@ -117,7 +119,7 @@ def remove_stores(self) -> None: """Cleans up all keys/certs/stores on a unit.""" try: self.workload.exec( - command="rm -rf *.pem *.key *.p12 *.jks", + command=["rm", "-rf", "*.pem", "*.key", "*.p12", "*.jks"], working_dir=self.workload.paths.conf_path, ) except (subprocess.CalledProcessError, ExecError) as e: diff --git a/src/workload.py b/src/workload.py index d2703e67..7463cff1 100644 --- a/src/workload.py +++ b/src/workload.py @@ -8,24 +8,27 @@ import os import re import subprocess +from typing import Mapping from charms.operator_libs_linux.v1 import snap from tenacity import retry, retry_if_result, stop_after_attempt, wait_fixed from typing_extensions import override -from core.workload import WorkloadBase -from literals import CHARMED_KAFKA_SNAP_REVISION, GROUP, SNAP_NAME, USER +from core.workload import CharmedKafkaPaths, WorkloadBase +from literals import BALANCER, BROKER, CHARMED_KAFKA_SNAP_REVISION, GROUP, SNAP_NAME, USER logger = logging.getLogger(__name__) -class KafkaWorkload(WorkloadBase): +class Workload(WorkloadBase): """Wrapper for performing common operations specific to the Kafka Snap.""" # FIXME: Paths and constants integrated into WorkloadBase? SNAP_NAME = "charmed-kafka" - SNAP_SERVICE = "daemon" - LOG_SLOT = "logs" + LOG_SLOTS = ["kafka-logs", "cc-logs"] + + paths: CharmedKafkaPaths + service: str def __init__(self) -> None: self.kafka = snap.SnapCache()[SNAP_NAME] @@ -33,21 +36,21 @@ def __init__(self) -> None: @override def start(self) -> None: try: - self.kafka.start(services=[self.SNAP_SERVICE]) + self.kafka.start(services=[self.service]) except snap.SnapError as e: logger.exception(str(e)) @override def stop(self) -> None: try: - self.kafka.stop(services=[self.SNAP_SERVICE]) + self.kafka.stop(services=[self.service]) except snap.SnapError as e: logger.exception(str(e)) @override def restart(self) -> None: try: - self.kafka.restart(services=[self.SNAP_SERVICE]) + self.kafka.restart(services=[self.service]) except snap.SnapError as e: logger.exception(str(e)) @@ -67,25 +70,28 @@ def write(self, content: str, path: str, mode: str = "w") -> None: with open(path, mode) as f: f.write(content) - self.exec(f"chown -R {USER}:{GROUP} {path}") + self.exec(["chown", "-R", f"{USER}:{GROUP}", f"{path}"]) @override def exec( - self, command: str, env: dict[str, str] | None = None, working_dir: str | None = None + self, + command: list[str] | str, + env: Mapping[str, str] | None = None, + working_dir: str | None = None, ) -> str: try: output = subprocess.check_output( command, stderr=subprocess.PIPE, universal_newlines=True, - shell=True, + shell=isinstance(command, str), env=env, cwd=working_dir, ) logger.debug(f"{output=}") return output except subprocess.CalledProcessError as e: - logger.debug(f"cmd failed - cmd={e.cmd}, stdout={e.stdout}, stderr={e.stderr}") + logger.error(f"cmd failed - cmd={e.cmd}, stdout={e.stdout}, stderr={e.stderr}") raise e @override @@ -97,17 +103,10 @@ def exec( ) def active(self) -> bool: try: - return bool(self.kafka.services[self.SNAP_SERVICE]["active"]) + return bool(self.kafka.services[self.service]["active"]) except KeyError: return False - @override - def run_bin_command(self, bin_keyword: str, bin_args: list[str], opts: list[str] = []) -> str: - opts_str = " ".join(opts) - bin_str = " ".join(bin_args) - command = f"{opts_str} {SNAP_NAME}.{bin_keyword} {bin_str}" - return self.exec(command) - def install(self) -> bool: """Loads the Kafka snap from LP. @@ -120,7 +119,7 @@ def install(self) -> bool: self.kafka.hold() return True - except (snap.SnapError) as e: + except snap.SnapError as e: logger.error(str(e)) return False @@ -153,14 +152,34 @@ def get_service_pid(self) -> int: with open(f"/proc/{pid}/cgroup", "r") as fid: content = "".join(fid.readlines()) - if f"{self.SNAP_NAME}.{self.SNAP_SERVICE}" in content: + if f"{self.SNAP_NAME}.{self.service}" in content: logger.debug( - f"Found Snap service {self.SNAP_SERVICE} for {self.SNAP_NAME} with PID {pid}" + f"Found Snap service {self.service} for {self.SNAP_NAME} with PID {pid}" ) return int(pid) raise snap.SnapError(f"Snap {self.SNAP_NAME} pid not found") + @override + def run_bin_command( + self, bin_keyword: str, bin_args: list[str], opts: list[str] | None = None + ) -> str: + if opts is None: + opts = [] + opts_str = " ".join(opts) + bin_str = " ".join(bin_args) + command = f"{opts_str} {SNAP_NAME}.{bin_keyword} {bin_str}" + return self.exec(command) + + +class KafkaWorkload(Workload): + """Broker specific wrapper.""" + + def __init__(self) -> None: + super().__init__() + self.paths = CharmedKafkaPaths(BROKER) + self.service = BROKER.service + @override def get_version(self) -> str: if not self.active: @@ -170,3 +189,16 @@ def get_version(self) -> str: except: # noqa: E722 version = "" return version + + +class BalancerWorkload(Workload): + """Balancer specific wrapper.""" + + def __init__(self) -> None: + super().__init__() + self.paths = CharmedKafkaPaths(BALANCER) + self.service = BALANCER.service + + @override + def get_version(self) -> str: + raise NotImplementedError diff --git a/tests/integration/ha/ha_helpers.py b/tests/integration/ha/ha_helpers.py index 8224369c..0dd53df9 100644 --- a/tests/integration/ha/ha_helpers.py +++ b/tests/integration/ha/ha_helpers.py @@ -59,7 +59,7 @@ async def get_topic_description( unit_name = unit_name or ops_test.model.applications[APP_NAME].units[0].name output = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.topics --bootstrap-server {','.join(bootstrap_servers)} --command-config {PATHS['CONF']}/client.properties --describe --topic {topic}'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.topics --bootstrap-server {','.join(bootstrap_servers)} --command-config {PATHS['kafka']['CONF']}/client.properties --describe --topic {topic}'", stderr=PIPE, shell=True, universal_newlines=True, @@ -91,7 +91,7 @@ async def get_topic_offsets( # example of topic offset output: 'test-topic:0:10' result = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.get-offsets --bootstrap-server {','.join(bootstrap_servers)} --command-config {PATHS['CONF']}/client.properties --topic {topic}'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {unit_name} sudo -i 'charmed-kafka.get-offsets --bootstrap-server {','.join(bootstrap_servers)} --command-config {PATHS['kafka']['CONF']}/client.properties --topic {topic}'", stderr=PIPE, shell=True, universal_newlines=True, diff --git a/tests/integration/ha/test_ha.py b/tests/integration/ha/test_ha.py index 10bb8c54..26bc30db 100644 --- a/tests/integration/ha/test_ha.py +++ b/tests/integration/ha/test_ha.py @@ -40,6 +40,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + @pytest.fixture() async def c_writes(ops_test: OpsTest): diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 1f3bc39f..368d085d 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -6,6 +6,7 @@ import socket import subprocess from contextlib import closing +from json.decoder import JSONDecodeError from pathlib import Path from subprocess import PIPE, check_output from typing import Any, Dict, List, Optional, Set @@ -16,8 +17,13 @@ from kafka.admin import NewTopic from kazoo.exceptions import AuthFailedError, NoNodeError from pytest_operator.plugin import OpsTest +from tenacity import retry +from tenacity.retry import retry_if_result +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_fixed -from literals import PATHS, SECURITY_PROTOCOL_PORTS +from core.models import JSON +from literals import BALANCER_WEBSERVER_USER, JMX_CC_PORT, PATHS, PEER, SECURITY_PROTOCOL_PORTS from managers.auth import Acl, AuthManager METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) @@ -244,7 +250,7 @@ def check_logs(ops_test: OpsTest, kafka_unit_name: str, topic: str) -> None: topic: the desired topic to check """ logs = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh {kafka_unit_name} sudo -i 'find {PATHS['DATA']}/data'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {kafka_unit_name} sudo -i 'find {PATHS['kafka']['DATA']}/data'", stderr=PIPE, shell=True, universal_newlines=True, @@ -266,7 +272,7 @@ async def run_client_properties(ops_test: OpsTest) -> str: + f":{SECURITY_PROTOCOL_PORTS['SASL_PLAINTEXT', 'SCRAM-SHA-512'].client}" ) result = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'charmed-kafka.configs --bootstrap-server {bootstrap_server} --describe --all --command-config {PATHS['CONF']}/client.properties --entity-type users'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'charmed-kafka.configs --bootstrap-server {bootstrap_server} --describe --all --command-config {PATHS['kafka']['CONF']}/client.properties --entity-type users'", stderr=PIPE, shell=True, universal_newlines=True, @@ -278,7 +284,7 @@ async def run_client_properties(ops_test: OpsTest) -> str: async def set_mtls_client_acls(ops_test: OpsTest, bootstrap_server: str) -> str: """Adds ACLs for principal `User:client` and `TEST-TOPIC`.""" result = check_output( - f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'sudo charmed-kafka.acls --bootstrap-server {bootstrap_server} --add --allow-principal=User:client --operation READ --operation WRITE --operation CREATE --topic TEST-TOPIC --command-config {PATHS['CONF']}/client.properties'", + f"JUJU_MODEL={ops_test.model_full_name} juju ssh kafka/0 sudo -i 'sudo charmed-kafka.acls --bootstrap-server {bootstrap_server} --add --allow-principal=User:client --operation READ --operation WRITE --operation CREATE --topic TEST-TOPIC --command-config {PATHS['kafka']['CONF']}/client.properties'", stderr=PIPE, shell=True, universal_newlines=True, @@ -443,3 +449,113 @@ async def get_address(ops_test: OpsTest, app_name=APP_NAME, unit_num=0) -> str: status = await ops_test.model.get_status() # noqa: F821 address = status["applications"][app_name]["units"][f"{app_name}/{unit_num}"]["public-address"] return address + + +def balancer_exporter_is_up(model_full_name: str | None, app_name: str) -> bool: + check_output( + f"JUJU_MODEL={model_full_name} juju ssh {app_name}/leader sudo -i 'curl http://localhost:{JMX_CC_PORT}/metrics'", + stderr=PIPE, + shell=True, + universal_newlines=True, + ) + return True + + +def balancer_is_running(model_full_name: str | None, app_name: str) -> bool: + check_output( + f"JUJU_MODEL={model_full_name} juju ssh {app_name}/leader sudo -i 'curl http://localhost:9090/kafkacruisecontrol/state'", + stderr=PIPE, + shell=True, + universal_newlines=True, + ) + return True + + +def balancer_is_secure(ops_test: OpsTest, app_name: str) -> bool: + model_full_name = ops_test.model_full_name + err_401 = "Error 401 Unauthorized" + unauthorized_ok = err_401 in check_output( + f"JUJU_MODEL={model_full_name} juju ssh {app_name}/leader sudo -i 'curl http://localhost:9090/kafkacruisecontrol/state'", + stderr=PIPE, + shell=True, + universal_newlines=True, + ) + + pwd = get_secret_by_label(ops_test=ops_test, label=f"{PEER}.{app_name}.app", owner=app_name)[ + "balancer-password" + ] + authorized_ok = err_401 not in check_output( + f"JUJU_MODEL={model_full_name} juju ssh {app_name}/leader sudo -i 'curl http://localhost:9090/kafkacruisecontrol/state'" + f" -u {BALANCER_WEBSERVER_USER}:{pwd}", + stderr=PIPE, + shell=True, + universal_newlines=True, + ) + return all((unauthorized_ok, authorized_ok)) + + +@retry( + wait=wait_fixed(20), # long enough to not overwhelm the API + stop=stop_after_attempt(180), # give it 60 minutes to load + retry=retry_if_result(lambda result: result is False), + retry_error_callback=lambda _: False, +) +def balancer_is_ready(ops_test: OpsTest, app_name: str) -> bool: + pwd = get_secret_by_label(ops_test=ops_test, label=f"{PEER}.{app_name}.app", owner=app_name)[ + "balancer-password" + ] + monitor_state = check_output( + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {app_name}/leader sudo -i 'curl http://localhost:9090/kafkacruisecontrol/state?json=True'" + f" -u {BALANCER_WEBSERVER_USER}:{pwd}", + stderr=PIPE, + shell=True, + universal_newlines=True, + ) + + try: + monitor_state_json = json.loads(monitor_state).get("MonitorState", {}) + except JSONDecodeError as e: + logger.error(e) + return False + + print(f"{monitor_state_json=}") + + return all( + [ + monitor_state_json.get("numMonitoredWindows", 0), + monitor_state_json.get("numValidPartitions", 0), + ] + ) + + +@retry( + wait=wait_fixed(20), # long enough to not overwhelm the API + stop=stop_after_attempt(6), + reraise=True, +) +def get_kafka_broker_state(ops_test: OpsTest, app_name: str) -> JSON: + pwd = get_secret_by_label(ops_test=ops_test, label=f"{PEER}.{app_name}.app", owner=app_name)[ + "balancer-password" + ] + broker_state = check_output( + f"JUJU_MODEL={ops_test.model_full_name} juju ssh {app_name}/leader sudo -i 'curl http://localhost:9090/kafkacruisecontrol/kafka_cluster_state?json=True'" + f" -u {BALANCER_WEBSERVER_USER}:{pwd}", + stderr=PIPE, + shell=True, + universal_newlines=True, + ) + + try: + broker_state_json = json.loads(broker_state).get("KafkaBrokerState", {}) + except JSONDecodeError as e: + logger.error(e) + return False + + print(f"{broker_state_json=}") + + return broker_state_json + + +def get_replica_count_by_broker_id(ops_test: OpsTest, app_name: str) -> dict[str, Any]: + broker_state_json = get_kafka_broker_state(ops_test, app_name) + return broker_state_json.get("ReplicaCountByBrokerId", {}) diff --git a/tests/integration/test_balancer.py b/tests/integration/test_balancer.py new file mode 100644 index 00000000..f3584af1 --- /dev/null +++ b/tests/integration/test_balancer.py @@ -0,0 +1,356 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +import asyncio +import logging +from subprocess import CalledProcessError + +import pytest +from pytest_operator.plugin import OpsTest + +from literals import PEER_CLUSTER_ORCHESTRATOR_RELATION, PEER_CLUSTER_RELATION, TLS_RELATION + +from .helpers import ( + APP_NAME, + ZK_NAME, + balancer_exporter_is_up, + balancer_is_ready, + balancer_is_running, + balancer_is_secure, + get_replica_count_by_broker_id, +) + +logger = logging.getLogger(__name__) + +pytestmark = pytest.mark.balancer + +BALANCER_APP = "balancer" +PRODUCER_APP = "producer" +TLS_NAME = "self-signed-certificates" + + +@pytest.fixture(params=[APP_NAME, BALANCER_APP], scope="module") +async def balancer_app(ops_test: OpsTest, request): + yield request.param + + +class TestBalancer: + @pytest.mark.abort_on_fail + async def test_build_and_deploy(self, ops_test: OpsTest, kafka_charm, balancer_app): + await ops_test.model.add_machine(series="jammy") + machine_ids = await ops_test.model.get_machines() + + await asyncio.gather( + ops_test.model.deploy( + kafka_charm, + application_name=APP_NAME, + num_units=1, + series="jammy", + to=machine_ids[0], + config={"roles": "broker,balancer" if balancer_app == APP_NAME else "broker"}, + ), + ops_test.model.deploy( + ZK_NAME, channel="edge", application_name=ZK_NAME, num_units=1, series="jammy" + ), + ops_test.model.deploy( + "kafka-test-app", + application_name=PRODUCER_APP, + channel="edge", + num_units=1, + series="jammy", + config={ + "topic_name": "HOT-TOPIC", + "num_messages": 100000, + "role": "producer", + "partitions": 100, + "replication_factor": "3", + }, + ), + ) + + if balancer_app != APP_NAME: + await ops_test.model.deploy( + kafka_charm, + application_name=balancer_app, + num_units=1, + series="jammy", + config={"roles": balancer_app}, + ) + + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, balancer_app}), idle_period=30, timeout=3600 + ) + assert ops_test.model.applications[APP_NAME].status == "blocked" + assert ops_test.model.applications[ZK_NAME].status == "active" + assert ops_test.model.applications[balancer_app].status == "blocked" + + @pytest.mark.abort_on_fail + async def test_relate_not_enough_brokers(self, ops_test: OpsTest, balancer_app): + await ops_test.model.add_relation(APP_NAME, ZK_NAME) + await ops_test.model.add_relation(PRODUCER_APP, APP_NAME) + if balancer_app != APP_NAME: + await ops_test.model.add_relation( + f"{APP_NAME}:{PEER_CLUSTER_RELATION}", + f"{BALANCER_APP}:{PEER_CLUSTER_ORCHESTRATOR_RELATION}", + ) + + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, balancer_app}), idle_period=30 + ) + + async with ops_test.fast_forward(fast_interval="20s"): + await asyncio.sleep(60) # ensure update-status adds broker-capacities if missed + + assert ops_test.model.applications[balancer_app].status == "waiting" + + with pytest.raises(CalledProcessError): + assert balancer_is_running( + model_full_name=ops_test.model_full_name, app_name=balancer_app + ) + + @pytest.mark.abort_on_fail + async def test_minimum_brokers_balancer_starts(self, ops_test: OpsTest, balancer_app): + await ops_test.model.applications[APP_NAME].add_units(count=2) + await ops_test.model.block_until( + lambda: len(ops_test.model.applications[APP_NAME].units) == 3 + ) + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, balancer_app, PRODUCER_APP}), + status="active", + timeout=1800, + idle_period=30, + ) + + assert balancer_is_running(model_full_name=ops_test.model_full_name, app_name=balancer_app) + assert balancer_is_secure(ops_test, app_name=balancer_app) + + async def test_balancer_exporter_endpoints(self, ops_test: OpsTest, balancer_app): + assert balancer_exporter_is_up(ops_test.model_full_name, balancer_app) + + @pytest.mark.abort_on_fail + async def test_balancer_monitor_state(self, ops_test: OpsTest, balancer_app): + assert balancer_is_ready(ops_test=ops_test, app_name=balancer_app) + + @pytest.mark.abort_on_fail + async def test_add_unit_full_rebalance(self, ops_test: OpsTest, balancer_app): + await ops_test.model.applications[APP_NAME].add_units( + count=1 # up to 4, new unit won't have any partitions + ) + await ops_test.model.block_until( + lambda: len(ops_test.model.applications[APP_NAME].units) == 4 + ) + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, PRODUCER_APP, balancer_app}), + status="active", + timeout=1800, + idle_period=30, + ) + async with ops_test.fast_forward(fast_interval="20s"): + await asyncio.sleep(120) # ensure update-status adds broker-capacities if missed + + assert balancer_is_ready(ops_test=ops_test, app_name=balancer_app) + + await asyncio.sleep(30) # let the API breathe after so many requests + + # verify CC can find the new broker_id 3, with no replica partitions allocated + broker_replica_count = get_replica_count_by_broker_id(ops_test, balancer_app) + new_broker_id = max(map(int, broker_replica_count.keys())) + new_broker_replica_count = int(broker_replica_count.get(str(new_broker_id), 0)) + + assert not new_broker_replica_count + + for unit in ops_test.model.applications[balancer_app].units: + if await unit.is_leader_from_status(): + leader_unit = unit + + rebalance_action_dry_run = await leader_unit.run_action( + "rebalance", mode="full", dryrun=True, timeout=600, block=True + ) + response = await rebalance_action_dry_run.wait() + assert response.results + + rebalance_action = await leader_unit.run_action( + "rebalance", mode="full", dryrun=False, timeout=600, block=True + ) + response = await rebalance_action.wait() + assert response.results + + assert int( + get_replica_count_by_broker_id(ops_test, balancer_app).get(str(new_broker_id), 0) + ) # replicas were successfully moved + + @pytest.mark.abort_on_fail + async def test_remove_unit_full_rebalance(self, ops_test: OpsTest, balancer_app): + # verify CC can find the new broker_id 3, with no replica partitions allocated + broker_replica_count = get_replica_count_by_broker_id(ops_test, balancer_app) + new_broker_id = max(map(int, broker_replica_count.keys())) + + # storing the current replica counts of 0, 1, 2 - they will persist + pre_rebalance_replica_counts = { + key: value + for key, value in get_replica_count_by_broker_id(ops_test, balancer_app).items() + if key != str(new_broker_id) + } + + # removing broker ungracefully + await ops_test.model.applications[APP_NAME].destroy_units(f"{APP_NAME}/{new_broker_id}") + await ops_test.model.block_until( + lambda: len(ops_test.model.applications[APP_NAME].units) == 3 + ) + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, PRODUCER_APP, balancer_app}), + status="active", + timeout=1800, + idle_period=30, + ) + async with ops_test.fast_forward(fast_interval="20s"): + await asyncio.sleep(180) # ensure update-status adds broker-capacities if missed + + assert balancer_is_ready(ops_test=ops_test, app_name=balancer_app) + + await asyncio.sleep(10) # let the API breathe after so many requests + + for unit in ops_test.model.applications[balancer_app].units: + if await unit.is_leader_from_status(): + leader_unit = unit + + rebalance_action_dry_run = await leader_unit.run_action( + "rebalance", mode="full", dryrun=True, timeout=600, block=True + ) + response = await rebalance_action_dry_run.wait() + assert response.results + + rebalance_action = await leader_unit.run_action( + "rebalance", mode="full", dryrun=False, timeout=600, block=True + ) + response = await rebalance_action.wait() + assert response.results + + post_rebalance_replica_counts = get_replica_count_by_broker_id(ops_test, balancer_app) + + assert not int(post_rebalance_replica_counts.get(str(new_broker_id), 0)) + + # looping over all brokerids, as rebalance *should* be even across all + for key, value in pre_rebalance_replica_counts.items(): + # verify that post-rebalance, surviving units increased replica counts + assert int(value) < int(post_rebalance_replica_counts.get(key, 0)) + + async def test_tls(self, ops_test: OpsTest, balancer_app): + # deploy and integrate tls + tls_config = {"ca-common-name": "kafka"} + + await ops_test.model.deploy(TLS_NAME, channel="edge", config=tls_config, series="jammy") + await ops_test.model.wait_for_idle(apps=[TLS_NAME], idle_period=15, timeout=1800) + assert ops_test.model.applications[TLS_NAME].status == "active" + + await ops_test.model.add_relation(TLS_NAME, ZK_NAME) + await ops_test.model.add_relation(TLS_NAME, f"{APP_NAME}:{TLS_RELATION}") + + if balancer_app != APP_NAME: + await ops_test.model.add_relation(TLS_NAME, f"{BALANCER_APP}:{TLS_RELATION}") + + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, balancer_app}), idle_period=30 + ) + async with ops_test.fast_forward(fast_interval="20s"): + await asyncio.sleep(60) # ensure update-status adds broker-capacities if missed + + # Assert that balancer is running and using certificates + assert balancer_is_running(model_full_name=ops_test.model_full_name, app_name=balancer_app) + + @pytest.mark.abort_on_fail + async def test_add_unit_targeted_rebalance(self, ops_test: OpsTest, balancer_app): + await ops_test.model.applications[APP_NAME].add_units( + count=1 # up to 4, new unit won't have any partitions + ) + await ops_test.model.block_until( + lambda: len(ops_test.model.applications[APP_NAME].units) == 4 + ) + await ops_test.model.wait_for_idle( + apps=list({APP_NAME, ZK_NAME, PRODUCER_APP, balancer_app}), + status="active", + timeout=1800, + idle_period=30, + ) + async with ops_test.fast_forward(fast_interval="20s"): + await asyncio.sleep(120) # ensure update-status adds broker-capacities if missed + + assert balancer_is_ready(ops_test=ops_test, app_name=balancer_app) + + await asyncio.sleep(30) # let the API breathe after so many requests + + # verify CC can find the new broker_id 3, with no replica partitions allocated + broker_replica_count = get_replica_count_by_broker_id(ops_test, balancer_app) + new_broker_id = max(map(int, broker_replica_count.keys())) + new_broker_replica_count = int(broker_replica_count.get(str(new_broker_id), 0)) + + assert not new_broker_replica_count + + for unit in ops_test.model.applications[balancer_app].units: + if await unit.is_leader_from_status(): + leader_unit = unit + + rebalance_action_dry_run = await leader_unit.run_action( + "rebalance", mode="add", brokerid=[new_broker_id], dryrun=True, timeout=600, block=True + ) + response = await rebalance_action_dry_run.wait() + assert response.results + + rebalance_action = await leader_unit.run_action( + "rebalance", + mode="add", + brokerid=new_broker_id, + dryrun=False, + timeout=600, + block=True, + ) + response = await rebalance_action.wait() + assert response.results + + assert int( + get_replica_count_by_broker_id(ops_test, balancer_app).get(str(new_broker_id), 0) + ) # replicas were successfully moved + + @pytest.mark.abort_on_fail + async def test_balancer_prepare_unit_removal(self, ops_test: OpsTest, balancer_app): + broker_replica_count = get_replica_count_by_broker_id(ops_test, balancer_app) + new_broker_id = max(map(int, broker_replica_count.keys())) + + for unit in ops_test.model.applications[balancer_app].units: + if await unit.is_leader_from_status(): + leader_unit = unit + + rebalance_action_dry_run = await leader_unit.run_action( + "rebalance", + mode="remove", + brokerid=new_broker_id, + dryrun=True, + timeout=600, + block=True, + ) + response = await rebalance_action_dry_run.wait() + assert response.results + + rebalance_action = await leader_unit.run_action( + "rebalance", + mode="remove", + brokerid=[new_broker_id], + dryrun=False, + timeout=600, + block=True, + ) + response = await rebalance_action.wait() + assert response.results + + broker_replica_count = get_replica_count_by_broker_id(ops_test, balancer_app) + new_broker_replica_count = int(broker_replica_count.get(str(new_broker_id), 0)) + + assert not new_broker_replica_count + + @pytest.mark.abort_on_fail + async def test_cleanup(self, ops_test: OpsTest, balancer_app): + for app in list({APP_NAME, ZK_NAME, balancer_app, PRODUCER_APP}): + await ops_test.model.remove_application( + app, block_until_done=True, force=True, no_wait=True, destroy_storage=True + ) diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index f070ace7..7c3df808 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -28,6 +28,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + SAME_ZK = f"{ZK_NAME}-same" SAME_KAFKA = f"{APP_NAME}-same" @@ -91,7 +93,7 @@ async def test_build_and_deploy(ops_test: OpsTest, kafka_charm): num_units=1, series="jammy", to=machine_ids[0], - storage={"data": {"pool": "test_pool", "size": 10240}}, + storage={"data": {"pool": "test_pool", "size": 1024}}, ), ops_test.model.deploy( ZK_NAME, channel="edge", application_name=ZK_NAME, num_units=1, series="jammy" diff --git a/tests/integration/test_password_rotation.py b/tests/integration/test_password_rotation.py index 5cc9e785..382c31c8 100644 --- a/tests/integration/test_password_rotation.py +++ b/tests/integration/test_password_rotation.py @@ -12,6 +12,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + DUMMY_NAME = "app" REL_NAME_ADMIN = "kafka-client-admin" diff --git a/tests/integration/test_provider.py b/tests/integration/test_provider.py index 0fa3a722..e215b8c5 100644 --- a/tests/integration/test_provider.py +++ b/tests/integration/test_provider.py @@ -19,6 +19,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + APP_NAME = "kafka" ZK = "zookeeper" DUMMY_NAME_1 = "app" @@ -49,10 +51,10 @@ async def test_deploy_charms_relate_active( await ops_test.model.add_relation(APP_NAME, ZK) await ops_test.model.add_relation(APP_NAME, f"{DUMMY_NAME_1}:{REL_NAME_CONSUMER}") - async with ops_test.fast_forward(fast_interval="60s"): - await ops_test.model.wait_for_idle( - apps=[APP_NAME, DUMMY_NAME_1, ZK], idle_period=30, status="active" - ) + # async with ops_test.fast_forward(fast_interval="60s"): + await ops_test.model.wait_for_idle( + apps=[APP_NAME, DUMMY_NAME_1, ZK], idle_period=30, status="active" + ) usernames.update(get_client_usernames(ops_test)) @@ -224,7 +226,10 @@ async def test_connection_updated_on_tls_enabled(ops_test: OpsTest, app_charm): # deploying tls tls_config = {"ca-common-name": "kafka"} - await ops_test.model.deploy(TLS_NAME, channel="edge", config=tls_config, series="jammy") + # FIXME (certs): Unpin the revision once the charm is fixed + await ops_test.model.deploy( + TLS_NAME, channel="edge", config=tls_config, series="jammy", revision=163 + ) await ops_test.model.wait_for_idle( apps=[TLS_NAME], idle_period=30, timeout=1800, status="active" ) diff --git a/tests/integration/test_scaling.py b/tests/integration/test_scaling.py index 52c9d88f..6bf56032 100644 --- a/tests/integration/test_scaling.py +++ b/tests/integration/test_scaling.py @@ -14,6 +14,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + @pytest.mark.abort_on_fail async def test_kafka_simple_scale_up(ops_test: OpsTest, kafka_charm): diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py index 8e14aba2..327dd55b 100644 --- a/tests/integration/test_tls.py +++ b/tests/integration/test_tls.py @@ -34,6 +34,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + TLS_NAME = "self-signed-certificates" CERTS_NAME = "tls-certificates-operator" MTLS_NAME = "mtls" @@ -45,7 +47,10 @@ async def test_deploy_tls(ops_test: OpsTest, kafka_charm): tls_config = {"ca-common-name": "kafka"} await asyncio.gather( - ops_test.model.deploy(TLS_NAME, channel="edge", config=tls_config, series="jammy"), + # FIXME (certs): Unpin the revision once the charm is fixed + ops_test.model.deploy( + TLS_NAME, channel="edge", config=tls_config, series="jammy", revision=163 + ), ops_test.model.deploy(ZK, channel="edge", series="jammy", application_name=ZK), ops_test.model.deploy( kafka_charm, diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index e60cdcb5..19b0777b 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -20,6 +20,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + CHANNEL = "3/stable" diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 62e27dbc..2379b0ca 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1,11 +1,14 @@ #!/usr/bin/env python3 # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +import json from unittest.mock import PropertyMock, patch import pytest from ops import JujuVersion -from src.literals import INTERNAL_USERS, SUBSTRATE + +from literals import INTERNAL_USERS, SUBSTRATE +from managers.balancer import CruiseControlClient @pytest.fixture(scope="module") @@ -73,3 +76,46 @@ def patched_health_machine_configured(): def juju_has_secrets(mocker): """Using Juju3 we should always have secrets available.""" mocker.patch.object(JujuVersion, "has_secrets", new_callable=PropertyMock).return_value = True + + +@pytest.fixture(autouse=True) +def patched_sleep(): + with patch("time.sleep") as patched: + yield patched + + +@pytest.fixture +def client() -> CruiseControlClient: + return CruiseControlClient("Beren", "Luthien") + + +@pytest.fixture(scope="function") +def state() -> dict: + with open("tests/unit/data/state.json") as f: + content = f.read() + + return json.loads(content) + + +@pytest.fixture(scope="function") +def kafka_cluster_state() -> dict: + with open("tests/unit/data/kafka_cluster_state.json") as f: + content = f.read() + + return json.loads(content) + + +@pytest.fixture(scope="function") +def proposal() -> dict: + with open("tests/unit/data/proposal.json") as f: + content = f.read() + + return json.loads(content) + + +@pytest.fixture(scope="function") +def user_tasks() -> dict: + with open("tests/unit/data/user_tasks.json") as f: + content = f.read() + + return json.loads(content) diff --git a/tests/unit/data/kafka_cluster_state.json b/tests/unit/data/kafka_cluster_state.json new file mode 100644 index 00000000..6bfb3aff --- /dev/null +++ b/tests/unit/data/kafka_cluster_state.json @@ -0,0 +1,1459 @@ +{ + "KafkaPartitionState": { + "offline": [], + "urp": [], + "with-offline-replicas": [], + "under-min-isr": [ + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 0, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 1, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 2, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 3, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 4, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 5, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 6, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 7, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 8, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 9, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 10, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 11, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 12, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 13, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 14, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 15, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 16, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 17, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 18, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 19, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 20, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 21, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 22, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 23, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 24, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 25, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 26, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 27, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 28, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 29, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 30, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 31, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 32, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 33, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 34, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 35, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 36, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 37, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 38, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 39, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 40, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 41, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 42, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 43, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 44, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 45, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 46, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 47, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 48, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 49, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 50, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 51, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 52, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 53, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 54, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 55, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 56, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 57, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 58, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 59, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 60, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 61, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 62, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 63, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 64, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 65, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 66, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 67, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 68, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 69, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 70, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 71, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 72, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 73, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 74, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 75, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 76, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 77, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 78, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 79, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 80, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 81, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 82, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 83, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 84, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 85, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 86, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 87, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 88, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 89, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 90, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 91, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 92, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 93, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 94, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 95, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 96, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 0 + ], + "leader": 0, + "out-of-sync": [], + "offline": [], + "partition": 97, + "min-isr": 2, + "replicas": [ + 0 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 2 + ], + "leader": 2, + "out-of-sync": [], + "offline": [], + "partition": 98, + "min-isr": 2, + "replicas": [ + 2 + ], + "topic": "HOT-TOPIC" + }, + { + "in-sync": [ + 1 + ], + "leader": 1, + "out-of-sync": [], + "offline": [], + "partition": 99, + "min-isr": 2, + "replicas": [ + 1 + ], + "topic": "HOT-TOPIC" + } + ] + }, + "KafkaBrokerState": { + "IsController": { + "0": false, + "1": false, + "2": true + }, + "OfflineReplicaCountByBrokerId": {}, + "ReplicaCountByBrokerId": { + "0": 93, + "1": 93, + "2": 92 + }, + "OfflineLogDirsByBrokerId": { + "0": [], + "1": [], + "2": [] + }, + "BrokerSetByBrokerId": {}, + "OnlineLogDirsByBrokerId": { + "0": [ + "/var/snap/charmed-kafka/common/var/lib/kafka/data/0" + ], + "1": [ + "/var/snap/charmed-kafka/common/var/lib/kafka/data/4" + ], + "2": [ + "/var/snap/charmed-kafka/common/var/lib/kafka/data/5" + ] + }, + "LeaderCountByBrokerId": { + "0": 56, + "1": 58, + "2": 56 + }, + "OutOfSyncCountByBrokerId": {}, + "Summary": { + "StdLeadersPerBroker": 0.9428090415820634, + "Leaders": 170, + "MaxLeadersPerBroker": 58, + "Topics": 5, + "MaxReplicasPerBroker": 93, + "StdReplicasPerBroker": 0.4714045207910317, + "Brokers": 3, + "AvgReplicationFactor": 1.6352941176470588, + "AvgLeadersPerBroker": 56.666666666666664, + "Replicas": 278, + "AvgReplicasPerBroker": 92.66666666666667 + } + }, + "version": 1 +} diff --git a/tests/unit/data/proposal.json b/tests/unit/data/proposal.json new file mode 100644 index 00000000..bc6705f4 --- /dev/null +++ b/tests/unit/data/proposal.json @@ -0,0 +1,1023 @@ +{ + "summary":{ + "numIntraBrokerReplicaMovements":0, + "numReplicaMovements":75, + "onDemandBalancednessScoreAfter":74.59480174018387, + "intraBrokerDataToMoveMB":0, + "monitoredPartitionsPercentage":100.0, + "provisionRecommendation":"", + "excludedBrokersForReplicaMove":[ + + ], + "excludedBrokersForLeadership":[ + + ], + "provisionStatus":"RIGHT_SIZED", + "onDemandBalancednessScoreBefore":53.66571290053624, + "recentWindows":5, + "dataToMoveMB":0, + "excludedTopics":[ + + ], + "numLeaderMovements":3 + }, + "goalSummary":[ + { + "goal":"ReplicaCapacityGoal", + "status":"FIXED", + "optimizationTimeMs":2, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"DiskCapacityGoal", + "status":"NO-ACTION", + "optimizationTimeMs":0, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"NetworkInboundCapacityGoal", + "status":"NO-ACTION", + "optimizationTimeMs":0, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"NetworkOutboundCapacityGoal", + "status":"NO-ACTION", + "optimizationTimeMs":0, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"CpuCapacityGoal", + "status":"NO-ACTION", + "optimizationTimeMs":0, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"ReplicaDistributionGoal", + "status":"VIOLATED", + "optimizationTimeMs":1, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"PotentialNwOutGoal", + "status":"NO-ACTION", + "optimizationTimeMs":0, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"DiskUsageDistributionGoal", + "status":"VIOLATED", + "optimizationTimeMs":1, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"NetworkInboundUsageDistributionGoal", + "status":"VIOLATED", + "optimizationTimeMs":1, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":24.68805379125702, + "cpu":0.43339327539800043, + "networkOutbound":0.20847283280263942, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.2926936447620392, + "networkOutbound":0.0, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":62, + "cpu":1.3442926406860352, + "networkOutbound":0.5833887308835983, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"NetworkOutboundUsageDistributionGoal", + "status":"FIXED", + "optimizationTimeMs":2, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":9.137833441248533, + "cpu":0.17645117642181293, + "networkOutbound":0.018604534631013317, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.8414653539657593, + "networkOutbound":0.2864152491092682, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":53, + "cpu":1.2517004013061523, + "networkOutbound":0.3319459483027458, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"CpuUsageDistributionGoal", + "status":"VIOLATED", + "optimizationTimeMs":2, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":5.0990195135927845, + "cpu":0.10591180343795259, + "networkOutbound":0.019098907134743884, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.9138083457946777, + "networkOutbound":0.290075920522213, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":48, + "cpu":1.1476210355758667, + "networkOutbound":0.341908797621727, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"LeaderReplicaDistributionGoal", + "status":"VIOLATED", + "optimizationTimeMs":0, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":5.0990195135927845, + "cpu":0.10591180343795259, + "networkOutbound":0.019098907134743884, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.9138083457946777, + "networkOutbound":0.290075920522213, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":48, + "cpu":1.1476210355758667, + "networkOutbound":0.341908797621727, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"LeaderBytesInDistributionGoal", + "status":"VIOLATED", + "optimizationTimeMs":1, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":5.0990195135927845, + "cpu":0.10591180343795259, + "networkOutbound":0.019098907134743884, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.9138083457946777, + "networkOutbound":0.290075920522213, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":48, + "cpu":1.1476210355758667, + "networkOutbound":0.341908797621727, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"TopicReplicaDistributionGoal", + "status":"NO-ACTION", + "optimizationTimeMs":1, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":5.0990195135927845, + "cpu":0.10591180343795259, + "networkOutbound":0.019098907134743884, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.9138083457946777, + "networkOutbound":0.290075920522213, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":48, + "cpu":1.1476210355758667, + "networkOutbound":0.341908797621727, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + }, + { + "goal":"PreferredLeaderElectionGoal", + "status":"FIXED", + "optimizationTimeMs":1, + "clusterModelStats":{ + "statistics":{ + "AVG":{ + "disk":6.371053695678711, + "replicas":109.0, + "leaderReplicas":42.0, + "cpu":1.0324535369873047, + "networkOutbound":0.3143334314227104, + "networkInbound":0.3897269666194916, + "topicReplicas":21.8, + "potentialNwOut":0.8383880089968443 + }, + "STD":{ + "disk":0.8835218535564802, + "replicas":19.63415391607186, + "leaderReplicas":5.873670062235365, + "cpu":0.1293263763184144, + "networkOutbound":0.06975035287805278, + "networkInbound":0.04812207548185394, + "topicReplicas":3.9814911142560048, + "potentialNwOut":0.14283563774352817 + }, + "MIN":{ + "disk":4.992422103881836, + "replicas":0, + "leaderReplicas":0, + "cpu":0.883521318435669, + "networkOutbound":0.21523501724004745, + "networkInbound":0.31450945138931274, + "topicReplicas":0, + "potentialNwOut":0.6290189027786255 + }, + "MAX":{ + "disk":7.345244407653809, + "replicas":121, + "leaderReplicas":49, + "cpu":1.1736074686050415, + "networkOutbound":0.41151297092437744, + "networkInbound":0.4431391954421997, + "topicReplicas":75, + "potentialNwOut":0.9946807771921158 + } + }, + "metadata":{ + "topics":5, + "brokers":5, + "replicas":436 + } + } + } + ], + "loadAfterOptimization":{ + "brokers":[ + { + "FollowerNwInRate":0.0, + "BrokerState":"DEAD", + "Broker":0, + "NwOutRate":0.0, + "NumCore":0.0, + "Host":"UNKNOWN_HOST-0", + "CpuPct":0.0, + "Replicas":0, + "NetworkInCapacity":0.0, + "Rack":"", + "Leaders":0, + "DiskCapacityMB":0.0, + "DiskMB":0.0, + "PnwOutRate":0.0, + "NetworkOutCapacity":0.0, + "LeaderNwInRate":0.0, + "DiskPct":-1.0 + }, + { + "FollowerNwInRate":0.3265414610505104, + "BrokerState":"ALIVE", + "Broker":1, + "NwOutRate":0.21523501724004745, + "NumCore":8.0, + "Host":"10.58.254.29", + "CpuPct":0.1156330332159996, + "Replicas":120, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.29", + "Leaders":39, + "DiskCapacityMB":1.00476656E8, + "DiskMB":6.882291793823242, + "PnwOutRate":0.9421811997890472, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.09012966603040695, + "DiskPct":6.8496425615749416E-6 + }, + { + "FollowerNwInRate":0.23535411059856415, + "BrokerState":"ALIVE", + "Broker":2, + "NwOutRate":0.30543773621320724, + "NumCore":8.0, + "Host":"10.58.254.95", + "CpuPct":0.14345262944698334, + "Replicas":121, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.95", + "Leaders":46, + "DiskCapacityMB":1.00476656E8, + "DiskMB":6.264252662658691, + "PnwOutRate":0.7876711562275887, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.14923398196697235, + "DiskPct":6.234535375718208E-6 + }, + { + "FollowerNwInRate":0.3185265436768532, + "BrokerState":"ALIVE", + "Broker":3, + "NwOutRate":0.32514800131320953, + "NumCore":8.0, + "Host":"10.58.254.54", + "CpuPct":0.11044016480445862, + "Replicas":120, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.54", + "Leaders":34, + "DiskCapacityMB":1.00476656E8, + "DiskMB":7.345244407653809, + "PnwOutRate":0.9946807771921158, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.12461265176534653, + "DiskPct":7.310398952423146E-6 + }, + { + "FollowerNwInRate":0.10875296592712402, + "BrokerState":"ALIVE", + "Broker":4, + "NwOutRate":0.41151297092437744, + "NumCore":8.0, + "Host":"10.58.254.109", + "CpuPct":0.1467009335756302, + "Replicas":75, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.109", + "Leaders":49, + "DiskCapacityMB":1.00476656E8, + "DiskMB":4.992422103881836, + "PnwOutRate":0.6290189027786255, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.20575648546218872, + "DiskPct":4.968738314581086E-6 + } + ], + "hosts":[ + { + "FollowerNwInRate":0.10875296592712402, + "NwOutRate":0.41151297092437744, + "NumCore":8.0, + "Host":"10.58.254.109", + "CpuPct":0.1467009335756302, + "Replicas":75, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.109", + "Leaders":49, + "DiskCapacityMB":1.00476656E8, + "DiskMB":4.992422103881836, + "PnwOutRate":0.6290189027786255, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.20575648546218872, + "DiskPct":4.968738314581086E-6 + }, + { + "FollowerNwInRate":0.3265414610505104, + "NwOutRate":0.21523501724004745, + "NumCore":8.0, + "Host":"10.58.254.29", + "CpuPct":0.1156330332159996, + "Replicas":120, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.29", + "Leaders":39, + "DiskCapacityMB":1.00476656E8, + "DiskMB":6.882291793823242, + "PnwOutRate":0.9421811997890472, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.09012966603040695, + "DiskPct":6.8496425615749416E-6 + }, + { + "FollowerNwInRate":0.3185265436768532, + "NwOutRate":0.32514800131320953, + "NumCore":8.0, + "Host":"10.58.254.54", + "CpuPct":0.11044016480445862, + "Replicas":120, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.54", + "Leaders":34, + "DiskCapacityMB":1.00476656E8, + "DiskMB":7.345244407653809, + "PnwOutRate":0.9946807771921158, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.12461265176534653, + "DiskPct":7.310398952423146E-6 + }, + { + "FollowerNwInRate":0.23535411059856415, + "NwOutRate":0.30543773621320724, + "NumCore":8.0, + "Host":"10.58.254.95", + "CpuPct":0.14345262944698334, + "Replicas":121, + "NetworkInCapacity":50000.0, + "Rack":"10.58.254.95", + "Leaders":46, + "DiskCapacityMB":1.00476656E8, + "DiskMB":6.264252662658691, + "PnwOutRate":0.7876711562275887, + "NetworkOutCapacity":50000.0, + "LeaderNwInRate":0.14923398196697235, + "DiskPct":6.234535375718208E-6 + }, + { + "FollowerNwInRate":0.0, + "NwOutRate":0.0, + "NumCore":0.0, + "Host":"UNKNOWN_HOST-0", + "CpuPct":0.0, + "Replicas":0, + "NetworkInCapacity":0.0, + "Rack":"", + "Leaders":0, + "DiskCapacityMB":0.0, + "DiskMB":0.0, + "PnwOutRate":0.0, + "NetworkOutCapacity":0.0, + "LeaderNwInRate":0.0, + "DiskPct":-1.0 + } + ] + }, + "version":1 +} diff --git a/tests/unit/data/state.json b/tests/unit/data/state.json new file mode 100644 index 00000000..6b4a3570 --- /dev/null +++ b/tests/unit/data/state.json @@ -0,0 +1,77 @@ +{ + "AnalyzerState": { + "isProposalReady": true, + "readyGoals": [ + "NetworkInboundUsageDistributionGoal", + "PreferredLeaderElectionGoal", + "CpuUsageDistributionGoal", + "PotentialNwOutGoal", + "LeaderReplicaDistributionGoal", + "NetworkInboundCapacityGoal", + "LeaderBytesInDistributionGoal", + "DiskCapacityGoal", + "ReplicaDistributionGoal", + "TopicReplicaDistributionGoal", + "NetworkOutboundCapacityGoal", + "CpuCapacityGoal", + "DiskUsageDistributionGoal", + "NetworkOutboundUsageDistributionGoal", + "ReplicaCapacityGoal" + ] + }, + "MonitorState": { + "trainingPct": 20, + "trained": false, + "numFlawedPartitions": 0, + "state": "RUNNING", + "numTotalPartitions": 170, + "numMonitoredWindows": 5, + "monitoringCoveragePct": 100, + "reasonOfLatestPauseOrResume": "N/A", + "numValidPartitions": 170 + }, + "ExecutorState": { + "state": "NO_TASK_IN_PROGRESS" + }, + "AnomalyDetectorState": { + "recentBrokerFailures": [], + "recentGoalViolations": [], + "selfHealingDisabled": [ + "BROKER_FAILURE", + "DISK_FAILURE", + "GOAL_VIOLATION", + "METRIC_ANOMALY", + "TOPIC_ANOMALY", + "MAINTENANCE_EVENT" + ], + "balancednessScore": 100, + "selfHealingEnabled": [], + "recentDiskFailures": [], + "metrics": { + "meanTimeToStartFixMs": 0, + "meanTimeBetweenAnomaliesMs": { + "BROKER_FAILURE": 0, + "MAINTENANCE_EVENT": 0, + "DISK_FAILURE": 0, + "GOAL_VIOLATION": 0, + "TOPIC_ANOMALY": 0, + "METRIC_ANOMALY": 0 + }, + "ongoingAnomalyDurationMs": 0, + "numSelfHealingStarted": 0, + "numSelfHealingFailedToStart": 0 + }, + "recentMetricAnomalies": [], + "recentTopicAnomalies": [], + "selfHealingEnabledRatio": { + "BROKER_FAILURE": 0, + "DISK_FAILURE": 0, + "GOAL_VIOLATION": 0, + "METRIC_ANOMALY": 0, + "TOPIC_ANOMALY": 0, + "MAINTENANCE_EVENT": 0 + }, + "recentMaintenanceEvents": [] + }, + "version": 1 +} diff --git a/tests/unit/data/user_tasks.json b/tests/unit/data/user_tasks.json new file mode 100644 index 00000000..13ab1eb5 --- /dev/null +++ b/tests/unit/data/user_tasks.json @@ -0,0 +1,145 @@ +{ + "userTasks":[ + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state", + "UserTaskId":"023bee96-df11-4c44-b423-bc73989a55e8", + "StartMs":"1721742522494" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"ecb629ba-83a6-4ba7-bfc9-787e1a4264ff", + "StartMs":"1721742523196" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"f41a05b7-ccd1-4bf0-8576-761373a964c6", + "StartMs":"1721742544060" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"49f3e90d-3535-4bef-8e69-4b2799fe532b", + "StartMs":"1721742564865" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"5c4824a3-ca5e-49ac-8660-930a5f250f38", + "StartMs":"1721742585799" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"c2ddae1c-4cf6-4490-b3fb-f8ae514d61c2", + "StartMs":"1721742606494" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"fb004625-dfae-484e-849a-203c70f28b34", + "StartMs":"1721742627259" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"895b0e29-6c09-4224-8e9a-b10d6fd18035", + "StartMs":"1721742648320" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"fb066a1d-8eed-4690-8f01-e88ff8d9f006", + "StartMs":"1721742669185" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"c28640f5-e083-4eb7-9a64-e63c7f83fd3f", + "StartMs":"1721742689975" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"bc6317f0-9f1f-44cd-ad0b-fed85a232b8c", + "StartMs":"1721742710925" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"a39bb820-af6f-4498-9b7a-f76554c6a1e5", + "StartMs":"1721742731742" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"e344f8da-0f85-4141-8266-e07334458e77", + "StartMs":"1721742752592" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"ec60c117-cbc5-4140-81c4-ffa6cac145f5", + "StartMs":"1721742773622" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"a0c78b9e-8f35-4cd6-aee1-7f441e83aff9", + "StartMs":"1721742794617" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"3e48c262-ea56-4a62-8bc7-4f2ba3bbe0aa", + "StartMs":"1721742815461" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"46b475ce-ea82-4547-9f96-2c7b9d9a5797", + "StartMs":"1721742836466" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"e4256bcb-93f7-4290-ab11-804a665bf011", + "StartMs":"1721742857054" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/user_tasks", + "UserTaskId":"5023b239-7128-4456-a5e0-af609ea161d5", + "StartMs":"1721742872009" + }, + { + "Status":"Completed", + "ClientIdentity":"127.0.0.1", + "RequestURL":"GET /kafkacruisecontrol/state?json\u003dTrue", + "UserTaskId":"4b7d463a-2362-4c98-a760-6572ae893b91", + "StartMs":"1721742877874" + } + ], + "version":1 +} diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 980e00b2..3587d39f 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -16,6 +16,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) @@ -63,7 +65,7 @@ def test_parse_acls(): parsed_acls = AuthManager._parse_acls(acls=acls) assert len(parsed_acls) == 5 - assert type(list(parsed_acls)[0]) == Acl + assert type(list(parsed_acls)[0]) is Acl def test_generate_producer_acls(): @@ -99,10 +101,10 @@ def test_generate_consumer_acls(): assert sorted(resource_types) == sorted({"TOPIC", "GROUP"}) -def test_add_user_adds_zk_tls_flag(harness): +def test_add_user_adds_zk_tls_flag(harness: Harness[KafkaCharm]): """Checks zk-tls-config-file flag is called for configs bin command.""" with patch("workload.KafkaWorkload.run_bin_command") as patched_exec: - harness.charm.auth_manager.add_user("samwise", "gamgee", zk_auth=True) + harness.charm.broker.auth_manager.add_user("samwise", "gamgee", zk_auth=True) args = patched_exec.call_args_list[0][1] assert ( diff --git a/tests/unit/test_balancer.py b/tests/unit/test_balancer.py new file mode 100644 index 00000000..ccc6d99e --- /dev/null +++ b/tests/unit/test_balancer.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +from pathlib import Path +from unittest.mock import MagicMock, PropertyMock, patch + +import pytest +import yaml +from ops.testing import Harness + +from charm import KafkaCharm +from literals import BALANCER_TOPICS, CHARM_KEY, CONTAINER, SUBSTRATE +from managers.balancer import CruiseControlClient + +logger = logging.getLogger(__name__) + +pytestmark = pytest.mark.balancer + +CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) +ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) +METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) + + +class MockResponse: + def __init__(self, content, status_code=200): + self.content = content + self.status_code = status_code + + def json(self): + return self.content + + def __dict__(self): + """Dict representation of content.""" + return self.content + + +@pytest.fixture +def harness(): + harness = Harness(KafkaCharm, meta=METADATA) + + if SUBSTRATE == "k8s": + harness.set_can_connect(CONTAINER, True) + + harness.add_relation("restart", CHARM_KEY) + harness._update_config( + { + "log_retention_ms": "-1", + "compression_type": "producer", + "roles": "broker,balancer", + } + ) + harness.set_leader(True) + + harness.begin() + return harness + + +def test_client_get_args(client: CruiseControlClient): + with patch("managers.balancer.requests.get") as patched_get: + client.get("silmaril") + + _, kwargs = patched_get.call_args + + assert kwargs["params"]["json"] + assert kwargs["params"]["json"] == "True" + + assert kwargs["auth"] + assert kwargs["auth"] == ("Beren", "Luthien") + + +def test_client_post_args(client: CruiseControlClient): + with patch("managers.balancer.requests.post") as patched_post: + client.post("silmaril") + + _, kwargs = patched_post.call_args + + assert kwargs["params"]["json"] + assert kwargs["params"]["dryrun"] + assert kwargs["auth"] + + assert kwargs["params"]["json"] == "True" + assert kwargs["params"]["dryrun"] == "False" + assert kwargs["auth"] == ("Beren", "Luthien") + + +def test_client_get_task_status(client: CruiseControlClient, user_tasks: dict): + with patch("managers.balancer.requests.get", return_value=MockResponse(user_tasks)): + assert ( + client.get_task_status(user_task_id="e4256bcb-93f7-4290-ab11-804a665bf011") + == "Completed" + ) + + +def test_client_monitoring(client: CruiseControlClient, state: dict): + with patch("managers.balancer.requests.get", return_value=MockResponse(state)): + assert client.monitoring + + +def test_client_executing(client: CruiseControlClient, state: dict): + with patch("managers.balancer.requests.get", return_value=MockResponse(state)): + assert not client.executing + + +def test_client_ready(client: CruiseControlClient, state: dict): + with patch("managers.balancer.requests.get", return_value=MockResponse(state)): + assert client.ready + + not_ready_state = state + not_ready_state["MonitorState"]["numMonitoredWindows"] = 0 # aka not ready + + with patch("managers.balancer.requests.get", return_value=MockResponse(not_ready_state)): + assert not client.ready + + +def test_balancer_manager_create_internal_topics(harness: Harness[KafkaCharm]): + with ( + patch("core.models.PeerCluster.broker_uris", new_callable=PropertyMock, return_value=""), + patch( + "workload.Workload.run_bin_command", + new_callable=None, + return_value=BALANCER_TOPICS[0], # pretend it exists already + ) as patched_run, + ): + harness.charm.balancer.balancer_manager.create_internal_topics() + + assert ( + len(patched_run.call_args_list) == 5 + ) # checks for existence 3 times, creates 2 times + + list_counter = 0 + for args, _ in patched_run.call_args_list: + all_flags = "".join(args[1]) + + if "list" in all_flags: + list_counter += 1 + + # only created needed topics + if "create" in all_flags: + assert any((topic in all_flags) for topic in BALANCER_TOPICS) + assert BALANCER_TOPICS[0] not in all_flags + + assert list_counter == len(BALANCER_TOPICS) # checked for existence of all balancer topics + + +@pytest.mark.parametrize("leader", [True, False]) +@pytest.mark.parametrize("monitoring", [True, False]) +@pytest.mark.parametrize("executing", [True, False]) +@pytest.mark.parametrize("ready", [True, False]) +@pytest.mark.parametrize("status", [200, 404]) +def test_balancer_manager_rebalance_full( + harness: Harness[KafkaCharm], + proposal: dict, + leader: bool, + monitoring: bool, + executing: bool, + ready: bool, + status: int, +): + mock_event = MagicMock() + mock_event.params = {"mode": "full", "dryrun": True} + + with ( + harness.hooks_disabled(), + patch( + "managers.balancer.CruiseControlClient.monitoring", + new_callable=PropertyMock, + return_value=monitoring, + ), + patch( + "managers.balancer.CruiseControlClient.executing", + new_callable=PropertyMock, + return_value=not executing, + ), + patch( + "managers.balancer.CruiseControlClient.ready", + new_callable=PropertyMock, + return_value=ready, + ), + patch( + "managers.balancer.BalancerManager.rebalance", + new_callable=None, + return_value=(MockResponse(content=proposal, status_code=status), "foo"), + ), + patch( + "managers.balancer.BalancerManager.wait_for_task", + new_callable=None, + ) as patched_wait_for_task, + ): + harness.set_leader(leader) + harness.charm.balancer.rebalance(mock_event) + + if not all([leader, monitoring, executing, ready, status == 200]): + assert mock_event._mock_children.get("fail") # event.fail was called + else: + assert patched_wait_for_task.call_count + assert mock_event._mock_children.get("set_results") # event.set_results was called + + +@pytest.mark.parametrize("mode", ["add", "remove"]) +@pytest.mark.parametrize("brokerid", [None, 0]) +def test_rebalance_add_remove_broker_id_length( + harness: Harness[KafkaCharm], proposal: dict, mode: str, brokerid: int | None +): + mock_event = MagicMock() + payload = {"mode": mode, "dryrun": True} + payload = payload | {"brokerid": brokerid} if brokerid is not None else payload + mock_event.params = payload + + with ( + harness.hooks_disabled(), + patch( + "managers.balancer.CruiseControlClient.monitoring", + new_callable=PropertyMock, + return_value=True, + ), + patch( + "managers.balancer.CruiseControlClient.executing", + new_callable=PropertyMock, + return_value=not True, + ), + patch( + "managers.balancer.CruiseControlClient.ready", + new_callable=PropertyMock, + return_value=True, + ), + patch( + "managers.balancer.BalancerManager.rebalance", + new_callable=None, + return_value=(MockResponse(content=proposal, status_code=200), "foo"), + ), + patch( + "managers.balancer.BalancerManager.wait_for_task", + new_callable=None, + ) as patched_wait_for_task, + ): + harness.set_leader(True) + harness.charm.balancer.rebalance(mock_event) + + if brokerid is None: + assert mock_event._mock_children.get("fail") # event.fail was called + else: + assert patched_wait_for_task.call_count + assert mock_event._mock_children.get("set_results") # event.set_results was called + + +def test_rebalance_broker_id_not_found(harness: Harness[KafkaCharm]): + mock_event = MagicMock() + payload = {"mode": "add", "dryrun": True, "brokerid": 999} + mock_event.params = payload + + with ( + harness.hooks_disabled(), + patch( + "managers.balancer.CruiseControlClient.monitoring", + new_callable=PropertyMock, + return_value=True, + ), + patch( + "managers.balancer.CruiseControlClient.executing", + new_callable=PropertyMock, + return_value=not True, + ), + patch( + "managers.balancer.CruiseControlClient.ready", + new_callable=PropertyMock, + return_value=True, + ), + ): + harness.set_leader(True) + + # When + harness.charm.balancer.rebalance(mock_event) + + # Then + assert mock_event._mock_children.get("fail") # event.fail was called + + +def test_balancer_manager_clean_results(harness: Harness[KafkaCharm], proposal: dict): + cleaned_results = harness.charm.balancer.balancer_manager.clean_results(value=proposal) + + def _check_cleaned_results(value) -> bool: + if isinstance(value, list): + for item in value: + assert not isinstance(item, dict) + + if isinstance(value, dict): + for k, v in value.items(): + assert k.islower() + assert _check_cleaned_results(v) + + return True + + assert _check_cleaned_results(cleaned_results) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 032c55d4..83bf3464 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -33,6 +33,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) @@ -61,7 +63,7 @@ def harness() -> Harness: @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_install_blocks_snap_install_failure(harness: Harness): +def test_install_blocks_snap_install_failure(harness: Harness[KafkaCharm]): """Checks unit goes to BlockedStatus after snap failure on install hook.""" with patch("workload.KafkaWorkload.install", return_value=False): harness.charm.on.install.emit() @@ -76,14 +78,16 @@ def test_install_sets_env_vars(harness: Harness, patched_etc_environment): @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_install_configures_os(harness: Harness, patched_sysctl_config): +def test_install_configures_os(harness: Harness[KafkaCharm], patched_sysctl_config): with patch("workload.KafkaWorkload.install"): harness.charm.on.install.emit() patched_sysctl_config.assert_called_once_with(OS_REQUIREMENTS) @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_install_sets_status_if_os_config_fails(harness: Harness, patched_sysctl_config): +def test_install_sets_status_if_os_config_fails( + harness: Harness[KafkaCharm], patched_sysctl_config +): with patch("workload.KafkaWorkload.install"): patched_sysctl_config.side_effect = ApplyError("Error setting values") harness.charm.on.install.emit() @@ -91,12 +95,12 @@ def test_install_sets_status_if_os_config_fails(harness: Harness, patched_sysctl assert harness.charm.unit.status == Status.SYSCONF_NOT_POSSIBLE.value.status -def test_ready_to_start_maintenance_no_peer_relation(harness: Harness): +def test_ready_to_start_maintenance_no_peer_relation(harness: Harness[KafkaCharm]): harness.charm.on.start.emit() assert harness.charm.unit.status == Status.NO_PEER_RELATION.value.status -def test_ready_to_start_blocks_no_zookeeper_relation(harness: Harness): +def test_ready_to_start_blocks_no_zookeeper_relation(harness: Harness[KafkaCharm]): with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) @@ -104,7 +108,7 @@ def test_ready_to_start_blocks_no_zookeeper_relation(harness: Harness): assert harness.charm.unit.status == Status.ZK_NOT_RELATED.value.status -def test_ready_to_start_waits_no_zookeeper_data(harness: Harness): +def test_ready_to_start_waits_no_zookeeper_data(harness: Harness[KafkaCharm]): with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) harness.add_relation(ZK, ZK) @@ -113,7 +117,7 @@ def test_ready_to_start_waits_no_zookeeper_data(harness: Harness): assert harness.charm.unit.status == Status.ZK_NO_DATA.value.status -def test_ready_to_start_waits_no_user_credentials(harness: Harness, zk_data): +def test_ready_to_start_waits_no_user_credentials(harness: Harness[KafkaCharm], zk_data): with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -123,7 +127,7 @@ def test_ready_to_start_waits_no_user_credentials(harness: Harness, zk_data): assert harness.charm.unit.status == Status.NO_BROKER_CREDS.value.status -def test_ready_to_start_blocks_mismatch_tls(harness: Harness, zk_data, passwords_data): +def test_ready_to_start_blocks_mismatch_tls(harness: Harness[KafkaCharm], zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -135,7 +139,7 @@ def test_ready_to_start_blocks_mismatch_tls(harness: Harness, zk_data, passwords assert harness.charm.unit.status == Status.ZK_TLS_MISMATCH.value.status -def test_ready_to_start_succeeds(harness: Harness, zk_data, passwords_data): +def test_ready_to_start_succeeds(harness: Harness[KafkaCharm], zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -145,7 +149,9 @@ def test_ready_to_start_succeeds(harness: Harness, zk_data, passwords_data): assert harness.charm.state.ready_to_start.value.status == Status.ACTIVE.value.status -def test_healthy_fails_if_not_ready_to_start(harness: Harness, zk_data, passwords_data): +def test_healthy_fails_if_not_ready_to_start( + harness: Harness[KafkaCharm], zk_data, passwords_data +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -153,10 +159,10 @@ def test_healthy_fails_if_not_ready_to_start(harness: Harness, zk_data, password harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) harness.update_relation_data(peer_rel_id, CHARM_KEY, {"tls": "enabled"}) - assert not harness.charm.healthy + assert not harness.charm.broker.healthy -def test_healthy_fails_if_snap_not_active(harness: Harness, zk_data, passwords_data): +def test_healthy_fails_if_snap_not_active(harness: Harness[KafkaCharm], zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -164,15 +170,15 @@ def test_healthy_fails_if_snap_not_active(harness: Harness, zk_data, passwords_d harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with patch("workload.KafkaWorkload.active", return_value=False) as patched_snap_active: - assert not harness.charm.healthy + assert not harness.charm.broker.healthy assert patched_snap_active.call_count == 1 if SUBSTRATE == "vm": - assert harness.charm.unit.status == Status.SNAP_NOT_RUNNING.value.status + assert harness.charm.unit.status == Status.BROKER_NOT_RUNNING.value.status elif SUBSTRATE == "k8s": assert harness.charm.unit.status == Status.SERVICE_NOT_RUNNING.value.status -def test_healthy_succeeds(harness: Harness, zk_data, passwords_data): +def test_healthy_succeeds(harness: Harness[KafkaCharm], zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -180,10 +186,10 @@ def test_healthy_succeeds(harness: Harness, zk_data, passwords_data): harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) with patch("workload.KafkaWorkload.active", return_value=True): - assert harness.charm.healthy + assert harness.charm.broker.healthy -def test_start_defers_without_zookeeper(harness: Harness): +def test_start_defers_without_zookeeper(harness: Harness[KafkaCharm]): """Checks event deferred and not lost without ZK relation on start hook.""" with patch("ops.framework.EventBase.defer") as patched_defer: harness.charm.on.start.emit() @@ -191,7 +197,7 @@ def test_start_defers_without_zookeeper(harness: Harness): patched_defer.assert_called_once() -def test_start_sets_necessary_config(harness: Harness, zk_data, passwords_data): +def test_start_sets_necessary_config(harness: Harness[KafkaCharm], zk_data, passwords_data): """Checks event writes all needed config to unit on start hook.""" with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) @@ -217,7 +223,7 @@ def test_start_sets_necessary_config(harness: Harness, zk_data, passwords_data): @pytest.mark.skipif(SUBSTRATE == "vm", reason="pebble layer not used on vm") -def test_start_sets_pebble_layer(harness: Harness, zk_data, passwords_data): +def test_start_sets_pebble_layer(harness: Harness[KafkaCharm], zk_data, passwords_data): """Checks layer is the expected at start.""" with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) @@ -262,7 +268,7 @@ def test_start_sets_pebble_layer(harness: Harness, zk_data, passwords_data): assert expected_plan == found_plan -def test_start_does_not_start_if_not_ready(harness: Harness): +def test_start_does_not_start_if_not_ready(harness: Harness[KafkaCharm]): """Checks snap service does not start before ready on start hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) @@ -277,7 +283,7 @@ def test_start_does_not_start_if_not_ready(harness: Harness): patched_defer.assert_called() -def test_start_does_not_start_if_not_same_tls_as_zk(harness: Harness): +def test_start_does_not_start_if_not_same_tls_as_zk(harness: Harness[KafkaCharm]): """Checks snap service does not start if mismatch Kafka+ZK TLS on start hook.""" harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -296,7 +302,7 @@ def test_start_does_not_start_if_not_same_tls_as_zk(harness: Harness): assert harness.charm.unit.status == Status.ZK_TLS_MISMATCH.value.status -def test_start_does_not_start_if_leader_has_not_set_creds(harness: Harness): +def test_start_does_not_start_if_leader_has_not_set_creds(harness: Harness[KafkaCharm]): """Checks snap service does not start without inter-broker creds on start hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -313,7 +319,9 @@ def test_start_does_not_start_if_leader_has_not_set_creds(harness: Harness): assert harness.charm.unit.status == Status.NO_BROKER_CREDS.value.status -def test_update_status_blocks_if_broker_not_active(harness: Harness, zk_data, passwords_data): +def test_update_status_blocks_if_broker_not_active( + harness: Harness[KafkaCharm], zk_data, passwords_data +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -331,7 +339,9 @@ def test_update_status_blocks_if_broker_not_active(harness: Harness, zk_data, pa @pytest.mark.skipif(SUBSTRATE == "k8s", reason="machine health checks not used on K8s") -def test_update_status_blocks_if_machine_not_configured(harness: Harness, zk_data, passwords_data): +def test_update_status_blocks_if_machine_not_configured( + harness: Harness[KafkaCharm], zk_data, passwords_data +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -340,16 +350,16 @@ def test_update_status_blocks_if_machine_not_configured(harness: Harness, zk_dat with ( patch("health.KafkaHealth.machine_configured", side_effect=SnapError()), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("core.cluster.ZooKeeper.broker_active", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): harness.charm.on.update_status.emit() - assert harness.charm.unit.status == Status.SNAP_NOT_RUNNING.value.status + assert harness.charm.unit.status == Status.BROKER_NOT_RUNNING.value.status @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_update_status_sets_sysconf_warning(harness: Harness, zk_data, passwords_data): +def test_update_status_sets_sysconf_warning(harness: Harness[KafkaCharm], zk_data, passwords_data): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -367,7 +377,7 @@ def test_update_status_sets_sysconf_warning(harness: Harness, zk_data, passwords def test_update_status_sets_active( - harness: Harness, zk_data, passwords_data, patched_health_machine_configured + harness: Harness[KafkaCharm], zk_data, passwords_data, patched_health_machine_configured ): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) @@ -385,7 +395,9 @@ def test_update_status_sets_active( @pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") -def test_storage_add_does_nothing_if_snap_not_active(harness: Harness, zk_data, passwords_data): +def test_storage_add_does_nothing_if_snap_not_active( + harness: Harness[KafkaCharm], zk_data, passwords_data +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -396,7 +408,7 @@ def test_storage_add_does_nothing_if_snap_not_active(harness: Harness, zk_data, with ( patch("workload.KafkaWorkload.active", return_value=False), - patch("charm.KafkaCharm._disable_enable_restart") as patched_restart, + patch("charm.KafkaCharm._disable_enable_restart_broker") as patched_restart, ): harness.add_storage(storage_name="data", count=2) harness.attach_storage(storage_id="data/1") @@ -405,7 +417,9 @@ def test_storage_add_does_nothing_if_snap_not_active(harness: Harness, zk_data, @pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") -def test_storage_add_defers_if_service_not_healthy(harness: Harness, zk_data, passwords_data): +def test_storage_add_defers_if_service_not_healthy( + harness: Harness[KafkaCharm], zk_data, passwords_data +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -416,8 +430,8 @@ def test_storage_add_defers_if_service_not_healthy(harness: Harness, zk_data, pa with ( patch("workload.KafkaWorkload.active", return_value=True), - patch("charm.KafkaCharm.healthy", return_value=False), - patch("charm.KafkaCharm._disable_enable_restart") as patched_restart, + patch("events.broker.BrokerOperator.healthy", return_value=False), + patch("charm.KafkaCharm._disable_enable_restart_broker") as patched_restart, patch("ops.framework.EventBase.defer") as patched_defer, ): harness.add_storage(storage_name="data", count=2) @@ -428,7 +442,9 @@ def test_storage_add_defers_if_service_not_healthy(harness: Harness, zk_data, pa @pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") -def test_storage_add_disableenables_and_starts(harness: Harness, zk_data, passwords_data): +def test_storage_add_disableenables_and_starts( + harness: Harness[KafkaCharm], zk_data, passwords_data +): with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -439,7 +455,7 @@ def test_storage_add_disableenables_and_starts(harness: Harness, zk_data, passwo with ( patch("workload.KafkaWorkload.active", return_value=True), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), patch("managers.config.ConfigManager.set_server_properties"), patch("managers.config.ConfigManager.set_client_properties"), @@ -457,7 +473,9 @@ def test_storage_add_disableenables_and_starts(harness: Harness, zk_data, passwo assert patched_defer.call_count == 0 -def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk(harness: Harness, zk_data): +def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk( + harness: Harness[KafkaCharm], zk_data +): """Checks inter-broker passwords are created on zookeeper-changed hook using zk auth.""" with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) @@ -490,7 +508,7 @@ def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk(harness: Har assert True -def test_zookeeper_joined_sets_chroot(harness: Harness): +def test_zookeeper_joined_sets_chroot(harness: Harness[KafkaCharm]): """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" harness.add_relation(PEER, CHARM_KEY) harness.set_leader(True) @@ -501,7 +519,7 @@ def test_zookeeper_joined_sets_chroot(harness: Harness): assert CHARM_KEY in rel.get("database", rel.get("chroot", "")) -def test_zookeeper_broken_stops_service_and_removes_meta_properties(harness: Harness): +def test_zookeeper_broken_stops_service_and_removes_meta_properties(harness: Harness[KafkaCharm]): """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" harness.add_relation(PEER, CHARM_KEY) zk_rel_id = harness.add_relation(ZK, ZK) @@ -513,11 +531,11 @@ def test_zookeeper_broken_stops_service_and_removes_meta_properties(harness: Har harness.remove_relation(zk_rel_id) patched_stop_snap_service.assert_called_once() - assert re.match(r"rm .*/meta.properties", patched_exec.call_args_list[0].args[0]) + assert re.match(r"rm .*/meta.properties", " ".join(patched_exec.call_args_list[0].args[0])) assert isinstance(harness.charm.unit.status, BlockedStatus) -def test_zookeeper_broken_cleans_internal_user_credentials(harness: Harness): +def test_zookeeper_broken_cleans_internal_user_credentials(harness: Harness[KafkaCharm]): """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) @@ -539,7 +557,7 @@ def test_zookeeper_broken_cleans_internal_user_credentials(harness: Harness): patched_update.assert_called_once_with({"saruman-password": ""}) -def test_config_changed_updates_server_properties(harness: Harness, zk_data): +def test_config_changed_updates_server_properties(harness: Harness[KafkaCharm], zk_data): """Checks that new charm/unit config writes server config to unit on config changed hook.""" with harness.hooks_disabled(): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) @@ -554,7 +572,7 @@ def test_config_changed_updates_server_properties(harness: Harness, zk_data): new_callable=PropertyMock, return_value=["gandalf=white"], ), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), patch("managers.config.ConfigManager.set_server_properties") as set_server_properties, @@ -565,7 +583,7 @@ def test_config_changed_updates_server_properties(harness: Harness, zk_data): set_server_properties.assert_called_once() -def test_config_changed_updates_client_properties(harness: Harness): +def test_config_changed_updates_client_properties(harness: Harness[KafkaCharm]): """Checks that new charm/unit config writes client config to unit on config changed hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -581,7 +599,7 @@ def test_config_changed_updates_client_properties(harness: Harness): new_callable=PropertyMock, return_value=["sauron=bad"], ), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), patch("managers.config.ConfigManager.set_server_properties"), @@ -592,7 +610,7 @@ def test_config_changed_updates_client_properties(harness: Harness): set_client_properties.assert_called_once() -def test_config_changed_updates_client_data(harness: Harness): +def test_config_changed_updates_client_data(harness: Harness[KafkaCharm]): """Checks that provided relation data updates on config changed hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -604,11 +622,11 @@ def test_config_changed_updates_client_data(harness: Harness): new_callable=PropertyMock, return_value=["gandalf=white"], ), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), patch("managers.config.ConfigManager.set_zk_jaas_config"), - patch("charm.KafkaCharm.update_client_data") as patched_update_client_data, + patch("events.broker.BrokerOperator.update_client_data") as patched_update_client_data, patch( "managers.config.ConfigManager.set_client_properties" ) as patched_set_client_properties, @@ -620,7 +638,7 @@ def test_config_changed_updates_client_data(harness: Harness): patched_update_client_data.assert_called_once() -def test_config_changed_restarts(harness: Harness): +def test_config_changed_restarts(harness: Harness[KafkaCharm]): """Checks units rolling-restat on config changed hook.""" peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -634,7 +652,7 @@ def test_config_changed_restarts(harness: Harness): new_callable=PropertyMock, return_value=["gandalf=grey"], ), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), patch("workload.KafkaWorkload.restart") as patched_restart_snap_service, @@ -652,7 +670,7 @@ def test_config_changed_restarts(harness: Harness): @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_on_remove_sysctl_is_deleted(harness: Harness): +def test_on_remove_sysctl_is_deleted(harness: Harness[KafkaCharm]): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") @@ -662,11 +680,11 @@ def test_on_remove_sysctl_is_deleted(harness: Harness): patched_sysctl_remove.assert_called_once() -def test_workload_version_is_setted(harness, monkeypatch): +def test_workload_version_is_setted(harness: Harness[KafkaCharm], monkeypatch): output_install = "3.6.0-ubuntu0" output_changed = "3.6.1-ubuntu0" monkeypatch.setattr( - harness.charm.workload, + harness.charm.broker.workload, "run_bin_command", Mock(side_effect=[output_install, output_changed]), ) @@ -681,7 +699,7 @@ def test_workload_version_is_setted(harness, monkeypatch): new_callable=PropertyMock, return_value=["gandalf=grey"], ), - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): diff --git a/tests/unit/test_charm_balancer.py b/tests/unit/test_charm_balancer.py new file mode 100644 index 00000000..6782b660 --- /dev/null +++ b/tests/unit/test_charm_balancer.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import json +import logging +import re +from pathlib import Path +from unittest.mock import patch + +import pytest +import yaml +from ops import ActiveStatus +from scenario import Context, PeerRelation, Relation, State + +from charm import KafkaCharm +from literals import ( + BALANCER_WEBSERVER_USER, + INTERNAL_USERS, + PEER, + ZK, + Status, +) + +pytestmark = pytest.mark.balancer + +logger = logging.getLogger(__name__) + + +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + + +@pytest.fixture() +def charm_configuration(): + """Enable direct mutation on configuration dict.""" + return json.loads(json.dumps(CONFIG)) + + +def test_install_blocks_snap_install_failure(charm_configuration): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + state_in = State() + + # When + with patch("workload.Workload.install", return_value=False), patch("workload.Workload.write"): + state_out = ctx.run("install", state_in) + + # Then + assert state_out.unit_status == Status.SNAP_NOT_INSTALLED.value.status + + +@patch("workload.Workload.restart") +def test_stop_workload_if_not_leader(patched_restart, charm_configuration): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + state_in = State(leader=False, relations=[]) + + # When + ctx.run("start", state_in) + + # Then + assert not patched_restart.called + + +def test_stop_workload_if_role_not_present(charm_configuration): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + state_in = State(leader=True, relations=[], config={"roles": "broker"}) + + # When + with ( + patch("workload.BalancerWorkload.active", return_value=True), + patch("workload.BalancerWorkload.stop") as patched_stopped, + ): + ctx.run("config_changed", state_in) + + # Then + patched_stopped.assert_called_once() + + +def test_ready_to_start_maintenance_no_peer_relation(charm_configuration): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + state_in = State(leader=True, relations=[]) + + # When + state_out = ctx.run("start", state_in) + + # Then + assert state_out.unit_status == Status.NO_PEER_RELATION.value.status + + +def test_ready_to_start_no_peer_cluster(charm_configuration): + """Balancer only, need a peer cluster relation.""" + # Given + charm_configuration["options"]["roles"]["default"] = "balancer" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + cluster_peer = PeerRelation(PEER, PEER) + state_in = State(leader=True, relations=[cluster_peer]) + + # When + state_out = ctx.run("start", state_in) + + # Then + assert state_out.unit_status == Status.NO_PEER_CLUSTER_RELATION.value.status + + +def test_ready_to_start_no_zk_data(charm_configuration): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer,broker" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + cluster_peer = PeerRelation(PEER, PEER) + relation = Relation( + interface=ZK, + endpoint=ZK, + remote_app_name=ZK, + ) + state_in = State(leader=True, relations=[cluster_peer, relation]) + + # When + state_out = ctx.run("start", state_in) + + # Then + assert state_out.unit_status == Status.ZK_NO_DATA.value.status + + +def test_ready_to_start_no_broker_data(charm_configuration, zk_data): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer,broker" + ctx = Context( + KafkaCharm, + meta=METADATA, + config=charm_configuration, + actions=ACTIONS, + ) + cluster_peer = PeerRelation( + PEER, PEER, local_app_data={f"{user}-password": "pwd" for user in INTERNAL_USERS} + ) + relation = Relation(interface=ZK, endpoint=ZK, remote_app_name=ZK, remote_app_data=zk_data) + state_in = State(leader=True, relations=[cluster_peer, relation]) + + # When + state_out = ctx.run("start", state_in) + + # Then + assert state_out.unit_status == Status.NO_BROKER_DATA.value.status + + +def test_ready_to_start_ok(charm_configuration, zk_data): + # Given + charm_configuration["options"]["roles"]["default"] = "balancer,broker" + ctx = Context(KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS) + cluster_peer = PeerRelation( + PEER, + local_app_data={f"{user}-password": "pwd" for user in INTERNAL_USERS}, + peers_data={ + i: { + "cores": "8", + "storages": json.dumps( + {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{i}": "10240"} + ), + } + for i in range(1, 3) + }, + local_unit_data={ + "cores": "8", + "storages": json.dumps( + {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{0}": "10240"} + ), + }, + ) + + relation = Relation(interface=ZK, endpoint=ZK, remote_app_name=ZK, remote_app_data=zk_data) + state_in = State(leader=True, relations=[cluster_peer, relation], planned_units=3) + + # When + with ( + patch("workload.BalancerWorkload.write") as patched_writer, + patch("workload.BalancerWorkload.read"), + patch("workload.BalancerWorkload.exec"), + patch("workload.BalancerWorkload.restart"), + patch("workload.KafkaWorkload.start"), + patch("workload.BalancerWorkload.active", return_value=True), + patch("core.models.ZooKeeper.broker_active", return_value=True), + ): + state_out = ctx.run("start", state_in) + + # Then + assert state_out.unit_status == ActiveStatus() + # Credentials written to file + assert re.match( + rf"{BALANCER_WEBSERVER_USER}: \w+,ADMIN", + patched_writer.call_args_list[-1].kwargs["content"], + ) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 354f2403..9cbd7d5a 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -18,6 +18,7 @@ DEPENDENCIES, INTER_BROKER_USER, INTERNAL_USERS, + JMX_CC_PORT, JMX_EXPORTER_PORT, JVM_MEM_MAX_GB, JVM_MEM_MIN_GB, @@ -28,6 +29,8 @@ ) from managers.config import ConfigManager +pytestmark = [pytest.mark.broker, pytest.mark.balancer] + BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")) CONFIG = str(yaml.safe_load(Path(BASE_DIR + "/config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path(BASE_DIR + "/actions.yaml").read_text())) @@ -62,7 +65,7 @@ def harness(): return harness -def test_all_storages_in_log_dirs(harness: Harness): +def test_all_storages_in_log_dirs(harness: Harness[KafkaCharm]): """Checks that the log.dirs property updates with all available storages.""" storage_metadata = harness.charm.meta.storages["data"] min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 1 @@ -74,7 +77,7 @@ def test_all_storages_in_log_dirs(harness: Harness): ) -def test_internal_credentials_only_return_when_all_present(harness: Harness): +def test_internal_credentials_only_return_when_all_present(harness: Harness[KafkaCharm]): peer_rel_id = harness.add_relation(PEER, CHARM_KEY) harness.update_relation_data( peer_rel_id, CHARM_KEY, {f"{INTERNAL_USERS[0]}-password": "mellon"} @@ -89,7 +92,7 @@ def test_internal_credentials_only_return_when_all_present(harness: Harness): assert len(harness.charm.state.cluster.internal_user_credentials) == len(INTERNAL_USERS) -def test_log_dirs_in_server_properties(harness: Harness): +def test_log_dirs_in_server_properties(harness: Harness[KafkaCharm]): """Checks that log.dirs are added to server_properties.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -112,21 +115,19 @@ def test_log_dirs_in_server_properties(harness: Harness): ) found_log_dirs = False - with ( - patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, - ) + with patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ): - for prop in harness.charm.config_manager.server_properties: + for prop in harness.charm.broker.config_manager.server_properties: if "log.dirs" in prop: found_log_dirs = True assert found_log_dirs -def test_listeners_in_server_properties(harness: Harness): +def test_listeners_in_server_properties(harness: Harness[KafkaCharm]): """Checks that listeners are split into INTERNAL and EXTERNAL.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -153,18 +154,18 @@ def test_listeners_in_server_properties(harness: Harness): expected_listeners = f"listeners=INTERNAL_{sasl_pm}://:19092" expected_advertised_listeners = f"advertised.listeners=INTERNAL_{sasl_pm}://{host}:19092" - with ( - patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, - ) + with patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ): - assert expected_listeners in harness.charm.config_manager.server_properties - assert expected_advertised_listeners in harness.charm.config_manager.server_properties + assert expected_listeners in harness.charm.broker.config_manager.server_properties + assert ( + expected_advertised_listeners in harness.charm.broker.config_manager.server_properties + ) -def test_oauth_client_listeners_in_server_properties(harness): +def test_oauth_client_listeners_in_server_properties(harness: Harness[KafkaCharm]): """Checks that oauth client listeners are properly set when a relating through oauth.""" harness.add_relation(ZK, CHARM_KEY) peer_relation_id = harness.add_relation(PEER, CHARM_KEY) @@ -208,11 +209,11 @@ def test_oauth_client_listeners_in_server_properties(harness): f"{scram_client_protocol}://{host}:{scram_client_port}," f"{oauth_client_protocol}://{host}:{oauth_client_port}" ) - assert expected_listeners in harness.charm.config_manager.server_properties - assert expected_advertised_listeners in harness.charm.config_manager.server_properties + assert expected_listeners in harness.charm.broker.config_manager.server_properties + assert expected_advertised_listeners in harness.charm.broker.config_manager.server_properties -def test_ssl_listeners_in_server_properties(harness: Harness): +def test_ssl_listeners_in_server_properties(harness: Harness[KafkaCharm]): """Checks that listeners are added after TLS relation are created.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) # Simulate data-integrator relation @@ -255,18 +256,18 @@ def test_ssl_listeners_in_server_properties(harness: Harness): f"listeners=INTERNAL_{sasl_pm}://:19093,CLIENT_{sasl_pm}://:9093,CLIENT_{ssl_pm}://:9094" ) expected_advertised_listeners = f"advertised.listeners=INTERNAL_{sasl_pm}://{host}:19093,CLIENT_{sasl_pm}://{host}:9093,CLIENT_{ssl_pm}://{host}:9094" - with ( - patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, - ) + with patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ): - assert expected_listeners in harness.charm.config_manager.server_properties - assert expected_advertised_listeners in harness.charm.config_manager.server_properties + assert expected_listeners in harness.charm.broker.config_manager.server_properties + assert ( + expected_advertised_listeners in harness.charm.broker.config_manager.server_properties + ) -def test_zookeeper_config_succeeds_fails_config(harness: Harness): +def test_zookeeper_config_succeeds_fails_config(harness: Harness[KafkaCharm]): """Checks that no ZK config is returned if missing field.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -284,7 +285,7 @@ def test_zookeeper_config_succeeds_fails_config(harness: Harness): assert not harness.charm.state.zookeeper.zookeeper_connected -def test_zookeeper_config_succeeds_valid_config(harness: Harness): +def test_zookeeper_config_succeeds_valid_config(harness: Harness[KafkaCharm]): """Checks that ZK config is returned if all fields.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) harness.update_relation_data( @@ -304,9 +305,9 @@ def test_zookeeper_config_succeeds_valid_config(harness: Harness): assert harness.charm.state.zookeeper.zookeeper_connected -def test_kafka_opts(harness: Harness): +def test_kafka_opts(harness: Harness[KafkaCharm]): """Checks necessary args for KAFKA_OPTS.""" - args = harness.charm.config_manager.kafka_opts + args = harness.charm.broker.config_manager.kafka_opts assert "-Djava.security.auth.login.config" in args assert "KAFKA_OPTS" in args @@ -315,7 +316,7 @@ def test_kafka_opts(harness: Harness): "profile,expected", [("production", JVM_MEM_MAX_GB), ("testing", JVM_MEM_MIN_GB)], ) -def test_heap_opts(harness: Harness, profile, expected): +def test_heap_opts(harness: Harness[KafkaCharm], profile, expected): """Checks necessary args for KAFKA_HEAP_OPTS.""" # Harness doesn't reinitialize KafkaCharm when calling update_config, which means that # self.config is not passed again to ConfigManager @@ -330,35 +331,45 @@ def test_heap_opts(harness: Harness, profile, expected): assert "KAFKA_HEAP_OPTS" in args -def test_jmx_opts(harness: Harness): +def test_kafka_jmx_opts(harness: Harness[KafkaCharm]): """Checks necessary args for KAFKA_JMX_OPTS.""" - args = harness.charm.config_manager.jmx_opts + args = harness.charm.broker.config_manager.kafka_jmx_opts assert "-javaagent:" in args assert args.split(":")[1].split("=")[-1] == str(JMX_EXPORTER_PORT) assert "KAFKA_JMX_OPTS" in args -def test_set_environment(harness: Harness, patched_workload_write, patched_etc_environment): +def test_cc_jmx_opts(harness: Harness[KafkaCharm]): + """Checks necessary args for CC_JMX_OPTS.""" + args = harness.charm.broker.config_manager.cc_jmx_opts + assert "-javaagent:" in args + assert args.split(":")[1].split("=")[-1] == str(JMX_CC_PORT) + assert "CC_JMX_OPTS" in args + + +def test_set_environment( + harness: Harness[KafkaCharm], patched_workload_write, patched_etc_environment +): """Checks all necessary env-vars are written to /etc/environment.""" with ( patch("workload.KafkaWorkload.write") as patched_write, patch("builtins.open", mock_open()), patch("shutil.chown"), ): - harness.charm.config_manager.set_environment() + harness.charm.broker.config_manager.set_environment() - for call in patched_write.call_args_list: - assert "KAFKA_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_JMX_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_HEAP_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_JVM_PERFORMANCE_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_CFG_LOGLEVEL" in call.kwargs.get("content", "") - assert "/etc/environment" == call.kwargs.get("path", "") + call = patched_write.call_args_list[0] + assert "KAFKA_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_JMX_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_HEAP_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_JVM_PERFORMANCE_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_CFG_LOGLEVEL" in call.kwargs.get("content", "") + assert "/etc/environment" == call.kwargs.get("path", "") - assert "KAFKA_LOG4J_OPTS" not in call.kwargs.get("content", "") + assert "KAFKA_LOG4J_OPTS" not in call.kwargs.get("content", "") -def test_bootstrap_server(harness: Harness): +def test_bootstrap_server(harness: Harness[KafkaCharm]): """Checks the bootstrap-server property setting.""" peer_relation_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") @@ -372,17 +383,20 @@ def test_bootstrap_server(harness: Harness): assert "9092" in server -def test_default_replication_properties_less_than_three(harness: Harness): +def test_default_replication_properties_less_than_three(harness: Harness[KafkaCharm]): """Checks replication property defaults updates with units < 3.""" - assert "num.partitions=1" in harness.charm.config_manager.default_replication_properties + assert "num.partitions=1" in harness.charm.broker.config_manager.default_replication_properties assert ( "default.replication.factor=1" - in harness.charm.config_manager.default_replication_properties + in harness.charm.broker.config_manager.default_replication_properties + ) + assert ( + "min.insync.replicas=1" + in harness.charm.broker.config_manager.default_replication_properties ) - assert "min.insync.replicas=1" in harness.charm.config_manager.default_replication_properties -def test_default_replication_properties_more_than_three(harness: Harness): +def test_default_replication_properties_more_than_three(harness: Harness[KafkaCharm]): """Checks replication property defaults updates with units > 3.""" peer_relation_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") @@ -391,15 +405,18 @@ def test_default_replication_properties_more_than_three(harness: Harness): harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/4") harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/5") - assert "num.partitions=3" in harness.charm.config_manager.default_replication_properties + assert "num.partitions=3" in harness.charm.broker.config_manager.default_replication_properties assert ( "default.replication.factor=3" - in harness.charm.config_manager.default_replication_properties + in harness.charm.broker.config_manager.default_replication_properties + ) + assert ( + "min.insync.replicas=2" + in harness.charm.broker.config_manager.default_replication_properties ) - assert "min.insync.replicas=2" in harness.charm.config_manager.default_replication_properties -def test_ssl_principal_mapping_rules(harness: Harness): +def test_ssl_principal_mapping_rules(harness: Harness[KafkaCharm]): """Check that a change in ssl_principal_mapping_rules is reflected in server_properties.""" harness.add_relation(PEER, CHARM_KEY) zk_relation_id = harness.add_relation(ZK, CHARM_KEY) @@ -417,12 +434,10 @@ def test_ssl_principal_mapping_rules(harness: Harness): }, ) - with ( - patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, - ) + with patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, ): # Harness doesn't reinitialize KafkaCharm when calling update_config, which means that # self.config is not passed again to ConfigManager @@ -437,7 +452,7 @@ def test_ssl_principal_mapping_rules(harness: Harness): ) -def test_auth_properties(harness: Harness): +def test_auth_properties(harness: Harness[KafkaCharm]): """Checks necessary auth properties are present.""" zk_relation_id = harness.add_relation(ZK, CHARM_KEY) peer_relation_id = harness.add_relation(PEER, CHARM_KEY) @@ -458,14 +473,14 @@ def test_auth_properties(harness: Harness): }, ) - assert "broker.id=0" in harness.charm.config_manager.auth_properties + assert "broker.id=0" in harness.charm.broker.config_manager.auth_properties assert ( f"zookeeper.connect={harness.charm.state.zookeeper.connect}" - in harness.charm.config_manager.auth_properties + in harness.charm.broker.config_manager.auth_properties ) -def test_rack_properties(harness: Harness): +def test_rack_properties(harness: Harness[KafkaCharm]): """Checks that rack properties are added to server properties.""" harness.add_relation(PEER, CHARM_KEY) zk_relation_id = harness.add_relation(ZK, CHARM_KEY) @@ -483,17 +498,15 @@ def test_rack_properties(harness: Harness): }, ) - with ( - patch( - "managers.config.ConfigManager.rack_properties", - new_callable=PropertyMock, - return_value=["broker.rack=gondor-west"], - ) + with patch( + "managers.config.ConfigManager.rack_properties", + new_callable=PropertyMock, + return_value=["broker.rack=gondor-west"], ): - assert "broker.rack=gondor-west" in harness.charm.config_manager.server_properties + assert "broker.rack=gondor-west" in harness.charm.broker.config_manager.server_properties -def test_inter_broker_protocol_version(harness: Harness): +def test_inter_broker_protocol_version(harness: Harness[KafkaCharm]): """Checks that rack properties are added to server properties.""" harness.add_relation(PEER, CHARM_KEY) zk_relation_id = harness.add_relation(ZK, CHARM_KEY) @@ -512,10 +525,13 @@ def test_inter_broker_protocol_version(harness: Harness): ) assert len(DEPENDENCIES["kafka_service"]["version"].split(".")) == 3 - assert "inter.broker.protocol.version=3.6" in harness.charm.config_manager.server_properties + assert ( + "inter.broker.protocol.version=3.6" + in harness.charm.broker.config_manager.server_properties + ) -def test_super_users(harness: Harness): +def test_super_users(harness: Harness[KafkaCharm]): """Checks super-users property is updated for new admin clients.""" peer_relation_id = harness.add_relation(PEER, CHARM_KEY) app_relation_id = harness.add_relation("kafka-client", "app") diff --git a/tests/unit/test_health.py b/tests/unit/test_health.py index 60812986..369027b8 100644 --- a/tests/unit/test_health.py +++ b/tests/unit/test_health.py @@ -15,12 +15,15 @@ logger = logging.getLogger(__name__) +pytestmark = [ + pytest.mark.broker, + pytest.mark.skipif(SUBSTRATE == "k8s", reason="health checks not used on K8s"), +] + CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) -pytestmark = pytest.mark.skipif(SUBSTRATE == "k8s", reason="health checks not used on K8s") - @pytest.fixture def harness(): @@ -41,7 +44,7 @@ def harness(): return harness -def test_service_pid(harness): +def test_service_pid(harness: Harness[KafkaCharm]): with ( patch( "builtins.open", @@ -50,44 +53,46 @@ def test_service_pid(harness): ), patch("subprocess.check_output", return_value="1314231"), ): - assert harness.charm.health._service_pid == 1314231 + assert harness.charm.broker.health._service_pid == 1314231 -def test_check_vm_swappiness(harness): +def test_check_vm_swappiness(harness: Harness[KafkaCharm]): with ( patch("health.KafkaHealth._get_vm_swappiness", return_value=5), patch("health.KafkaHealth._check_file_descriptors", return_value=True), patch("health.KafkaHealth._check_memory_maps", return_value=True), patch("health.KafkaHealth._check_total_memory", return_value=True), ): - assert not harness.charm.health._check_vm_swappiness() - assert not harness.charm.health.machine_configured() + assert not harness.charm.broker.health._check_vm_swappiness() + assert not harness.charm.broker.health.machine_configured() @pytest.mark.parametrize("total_mem_kb", [5741156, 65741156]) @pytest.mark.parametrize( "profile,limit", [("testing", JVM_MEM_MIN_GB), ("production", JVM_MEM_MAX_GB)] ) -def test_check_total_memory_testing_profile(harness, total_mem_kb, profile, limit): +def test_check_total_memory_testing_profile( + harness: Harness[KafkaCharm], total_mem_kb, profile, limit +): harness._update_config({"profile": profile}) with patch("workload.KafkaWorkload.read", return_value=[f"MemTotal: {total_mem_kb} kB"]): if total_mem_kb / 1000000 <= limit: - assert not harness.charm.health._check_total_memory() + assert not harness.charm.broker.health._check_total_memory() else: - assert harness.charm.health._check_total_memory() + assert harness.charm.broker.health._check_total_memory() -def test_get_partitions_size(harness): +def test_get_partitions_size(harness: Harness[KafkaCharm]): example_log_dirs = 'Querying brokers for log directories information\nReceived log directory information from brokers 0\n{"version":1,"brokers":[{"broker":0,"logDirs":[{"logDir":"/var/snap/charmed-kafka/common/var/lib/kafka/data/0","error":null,"partitions":[{"partition":"NEW-TOPIC-2-4","size":394,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-3","size":394,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-2","size":392,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-1","size":392,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-0","size":393,"offsetLag":0,"isFuture":false}]}]}]}\n' with patch("workload.KafkaWorkload.run_bin_command", return_value=example_log_dirs): - assert harness.charm.health._get_partitions_size() == (5, 393) + assert harness.charm.broker.health._get_partitions_size() == (5, 393) -def test_check_file_descriptors_no_listeners(harness): +def test_check_file_descriptors_no_listeners(harness: Harness[KafkaCharm]): with patch("workload.KafkaWorkload.run_bin_command") as patched_run_bin: - assert harness.charm.health._check_file_descriptors() + assert harness.charm.broker.health._check_file_descriptors() assert patched_run_bin.call_count == 0 @@ -95,7 +100,7 @@ def test_check_file_descriptors_no_listeners(harness): @pytest.mark.parametrize("fd", [True, False]) @pytest.mark.parametrize("swap", [True, False]) @pytest.mark.parametrize("mem", [True, False]) -def test_machine_configured_succeeds_and_fails(harness, mmap, fd, swap, mem): +def test_machine_configured_succeeds_and_fails(harness: Harness[KafkaCharm], mmap, fd, swap, mem): with ( patch("health.KafkaHealth._check_memory_maps", return_value=mmap), patch("health.KafkaHealth._check_file_descriptors", return_value=fd), @@ -103,6 +108,6 @@ def test_machine_configured_succeeds_and_fails(harness, mmap, fd, swap, mem): patch("health.KafkaHealth._check_total_memory", return_value=mem), ): if all([mmap, fd, swap, mem]): - assert harness.charm.health.machine_configured() + assert harness.charm.broker.health.machine_configured() else: - assert not harness.charm.health.machine_configured() + assert not harness.charm.broker.health.machine_configured() diff --git a/tests/unit/test_provider.py b/tests/unit/test_provider.py index 94a561df..94b53a5b 100644 --- a/tests/unit/test_provider.py +++ b/tests/unit/test_provider.py @@ -15,6 +15,8 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker + CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) @@ -39,13 +41,15 @@ def harness(): return harness -def test_client_relation_created_defers_if_not_ready(harness: Harness): +def test_client_relation_created_defers_if_not_ready(harness: Harness[KafkaCharm]): """Checks event is deferred if not ready on clientrelationcreated hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) with ( - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=False), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=False + ), patch("managers.auth.AuthManager.add_user") as patched_add_user, patch("ops.framework.EventBase.defer") as patched_defer, ): @@ -62,7 +66,7 @@ def test_client_relation_created_defers_if_not_ready(harness: Harness): patched_defer.assert_called() -def test_client_relation_created_adds_user(harness: Harness): +def test_client_relation_created_adds_user(harness: Harness[KafkaCharm]): """Checks if new users are added on clientrelationcreated hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) @@ -70,7 +74,9 @@ def test_client_relation_created_adds_user(harness: Harness): client_rel_id = harness.add_relation(REL_NAME, "app") with ( - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True + ), patch("managers.auth.AuthManager.add_user") as patched_add_user, patch("workload.KafkaWorkload.run_bin_command"), patch("core.cluster.ZooKeeper.connect", new_callable=PropertyMock, return_value="yes"), @@ -85,13 +91,15 @@ def test_client_relation_created_adds_user(harness: Harness): assert harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") -def test_client_relation_broken_removes_user(harness: Harness): +def test_client_relation_broken_removes_user(harness: Harness[KafkaCharm]): """Checks if users are removed on clientrelationbroken hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) with ( - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True + ), patch("managers.auth.AuthManager.add_user"), patch("managers.auth.AuthManager.delete_user") as patched_delete_user, patch("managers.auth.AuthManager.remove_all_user_acls") as patched_remove_acls, @@ -117,13 +125,15 @@ def test_client_relation_broken_removes_user(harness: Harness): patched_delete_user.assert_called_once() -def test_client_relation_joined_sets_necessary_relation_data(harness: Harness): +def test_client_relation_joined_sets_necessary_relation_data(harness: Harness[KafkaCharm]): """Checks if all needed provider relation data is set on clientrelationjoined hook.""" with harness.hooks_disabled(): harness.add_relation(PEER, CHARM_KEY) with ( - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True + ), patch("managers.auth.AuthManager.add_user"), patch("workload.KafkaWorkload.run_bin_command"), patch("core.models.ZooKeeper.uris", new_callable=PropertyMock, return_value="yes"), diff --git a/tests/unit/test_structured_config.py b/tests/unit/test_structured_config.py index 799674b9..e6b68e54 100644 --- a/tests/unit/test_structured_config.py +++ b/tests/unit/test_structured_config.py @@ -4,6 +4,7 @@ import logging from pathlib import Path +from typing import Iterable import pytest import yaml @@ -12,6 +13,8 @@ from charm import KafkaCharm from literals import CHARM_KEY, CONTAINER, SUBSTRATE +pytestmark = [pytest.mark.broker, pytest.mark.balancer] + CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) @@ -48,19 +51,22 @@ def test_config_parsing_parameters_integer_values(harness) -> None: check_valid_values(harness, field, valid_values) -def check_valid_values(_harness, field: str, accepted_values: list, is_long_field=False) -> None: +def check_valid_values( + _harness, field: str, accepted_values: Iterable, is_long_field=False +) -> None: """Check the correctness of the passed values for a field.""" for value in accepted_values: _harness.update_config({field: value}) assert _harness.charm.config[field] == value if not is_long_field else int(value) -def check_invalid_values(_harness, field: str, erroneus_values: list) -> None: +def check_invalid_values(_harness, field: str, erroneus_values: Iterable) -> None: """Check the incorrectness of the passed values for a field.""" - for value in erroneus_values: - _harness.update_config({field: value}) - with pytest.raises(ValueError): - _ = _harness.charm.config[field] + with _harness.hooks_disabled(): + for value in erroneus_values: + _harness.update_config({field: value}) + with pytest.raises(ValueError): + _ = _harness.charm.config[field] def test_product_related_values(harness) -> None: @@ -151,3 +157,12 @@ def test_config_parsing_parameters_long_values(harness) -> None: for field in long_fields: check_invalid_values(harness, field, erroneus_values) check_valid_values(harness, field, valid_values, is_long_field=True) + + +def test_incorrect_roles(harness): + erroneus_values = ["", "something_else" "broker, something_else" "broker,balancer,"] + valid_values = ["broker", "balancer", "balancer,broker", "broker, balancer "] + check_invalid_values(harness, "roles", erroneus_values) + for value in valid_values: + harness.update_config({"roles": value}) + assert harness.charm.config.roles diff --git a/tests/unit/test_tls.py b/tests/unit/test_tls.py index 8ce03433..ef7ea709 100644 --- a/tests/unit/test_tls.py +++ b/tests/unit/test_tls.py @@ -14,6 +14,8 @@ from charm import KafkaCharm from literals import CHARM_KEY, CONTAINER, PEER, SUBSTRATE, ZK +pytestmark = pytest.mark.broker + CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) @@ -62,7 +64,7 @@ def harness(): return harness -def test_blocked_if_trusted_certificate_added_before_tls_relation(harness: Harness): +def test_blocked_if_trusted_certificate_added_before_tls_relation(harness: Harness[KafkaCharm]): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") @@ -76,7 +78,7 @@ def test_blocked_if_trusted_certificate_added_before_tls_relation(harness: Harne assert isinstance(harness.charm.app.status, BlockedStatus) -def test_mtls_flag_added(harness: Harness): +def test_mtls_flag_added(harness: Harness[KafkaCharm]): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") @@ -93,7 +95,7 @@ def test_mtls_flag_added(harness: Harness): assert isinstance(harness.charm.app.status, ActiveStatus) -def test_extra_sans_config(harness: Harness): +def test_extra_sans_config(harness: Harness[KafkaCharm]): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/0") @@ -111,7 +113,7 @@ def test_extra_sans_config(harness: Harness): assert harness.charm.tls._extra_sans == ["worker0.com", "0.example"] -def test_sans(harness: Harness): +def test_sans(harness: Harness[KafkaCharm]): # Create peer relation peer_relation_id = harness.add_relation(PEER, CHARM_KEY) harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/0") diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 31c9dca5..1a9f1fd1 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -18,6 +18,7 @@ logger = logging.getLogger(__name__) +pytestmark = pytest.mark.broker CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) @@ -60,21 +61,21 @@ def harness(zk_data): return harness -def test_pre_upgrade_check_raises_not_stable(harness: Harness): +def test_pre_upgrade_check_raises_not_stable(harness: Harness[KafkaCharm]): with pytest.raises(ClusterNotReadyError): - harness.charm.upgrade.pre_upgrade_check() + harness.charm.broker.upgrade.pre_upgrade_check() -def test_pre_upgrade_check_succeeds(harness: Harness): +def test_pre_upgrade_check_succeeds(harness: Harness[KafkaCharm]): with ( - patch("charm.KafkaCharm.healthy", return_value=True), + patch("events.broker.BrokerOperator.healthy", return_value=True), patch("events.upgrade.KafkaUpgrade._set_rolling_update_partition"), ): - harness.charm.upgrade.pre_upgrade_check() + harness.charm.broker.upgrade.pre_upgrade_check() @pytest.mark.skipif(SUBSTRATE == "k8s", reason="upgrade stack not used on K8s") -def test_build_upgrade_stack(harness: Harness): +def test_build_upgrade_stack(harness: Harness[KafkaCharm]): with harness.hooks_disabled(): harness.add_relation_unit(harness.charm.state.peer_relation.id, f"{CHARM_KEY}/1") harness.update_relation_data( @@ -89,25 +90,27 @@ def test_build_upgrade_stack(harness: Harness): {"private-address": "222.222.222"}, ) - stack = harness.charm.upgrade.build_upgrade_stack() + stack = harness.charm.broker.upgrade.build_upgrade_stack() assert len(stack) == 3 assert len(stack) == len(set(stack)) @pytest.mark.parametrize("upgrade_stack", ([], [0])) -def test_run_password_rotation_while_upgrading(harness, upgrade_stack): - harness.charm.upgrade.upgrade_stack = upgrade_stack +def test_run_password_rotation_while_upgrading(harness: Harness[KafkaCharm], upgrade_stack): + harness.charm.broker.upgrade.upgrade_stack = upgrade_stack harness.set_leader(True) mock_event = MagicMock() mock_event.params = {"username": "admin"} with ( - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True + ), patch("managers.auth.AuthManager.add_user"), ): - harness.charm.password_action_events._set_password_action(mock_event) + harness.charm.broker.password_action_events._set_password_action(mock_event) if not upgrade_stack: mock_event.set_results.assert_called() @@ -125,7 +128,7 @@ def test_kafka_dependency_model(): def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails( - harness: Harness, upgrade_func: str + harness: Harness[KafkaCharm], upgrade_func: str ): with harness.hooks_disabled(): harness.set_leader(True) @@ -155,13 +158,13 @@ def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails( ), ): mock_event = MagicMock() - getattr(harness.charm.upgrade, upgrade_func)(mock_event) + getattr(harness.charm.broker.upgrade, upgrade_func)(mock_event) - assert harness.charm.upgrade.state == "failed" + assert harness.charm.broker.upgrade.state == "failed" @pytest.mark.skipif(SUBSTRATE == "k8s", reason="Upgrade granted not used on K8s charms") -def test_upgrade_granted_sets_failed_if_failed_snap(harness: Harness): +def test_upgrade_granted_sets_failed_if_failed_snap(harness: Harness[KafkaCharm]): with ( patch( "events.upgrade.KafkaUpgrade.zookeeper_current_version", @@ -169,16 +172,19 @@ def test_upgrade_granted_sets_failed_if_failed_snap(harness: Harness): return_value="3.6", ), patch("workload.KafkaWorkload.stop") as patched_stop, + patch("workload.BalancerWorkload.stop"), patch("workload.KafkaWorkload.install", return_value=False), ): mock_event = MagicMock() - harness.charm.upgrade._on_upgrade_granted(mock_event) + harness.charm.broker.upgrade._on_upgrade_granted(mock_event) patched_stop.assert_called_once() - assert harness.charm.upgrade.state == "failed" + assert harness.charm.broker.upgrade.state == "failed" -def test_upgrade_sets_failed_if_failed_upgrade_check(harness: Harness, upgrade_func: str): +def test_upgrade_sets_failed_if_failed_upgrade_check( + harness: Harness[KafkaCharm], upgrade_func: str +): with ( patch( "core.models.ZooKeeper.zookeeper_version", @@ -194,7 +200,10 @@ def test_upgrade_sets_failed_if_failed_upgrade_check(harness: Harness, upgrade_f patch("workload.KafkaWorkload.start") as patched_start, patch("workload.KafkaWorkload.stop"), patch("workload.KafkaWorkload.install"), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=False), + patch("workload.BalancerWorkload.stop"), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=False + ), patch( "core.cluster.ClusterState.ready_to_start", new_callable=PropertyMock, @@ -207,13 +216,13 @@ def test_upgrade_sets_failed_if_failed_upgrade_check(harness: Harness, upgrade_f ), ): mock_event = MagicMock() - getattr(harness.charm.upgrade, upgrade_func)(mock_event) + getattr(harness.charm.broker.upgrade, upgrade_func)(mock_event) assert patched_restart.call_count or patched_start.call_count - assert harness.charm.upgrade.state == "failed" + assert harness.charm.broker.upgrade.state == "failed" -def test_upgrade_succeeds(harness: Harness, upgrade_func: str): +def test_upgrade_succeeds(harness: Harness[KafkaCharm], upgrade_func: str): with ( patch( "core.models.ZooKeeper.zookeeper_version", @@ -230,7 +239,10 @@ def test_upgrade_succeeds(harness: Harness, upgrade_func: str): patch("workload.KafkaWorkload.stop"), patch("workload.KafkaWorkload.install"), patch("workload.KafkaWorkload.active", new_callable=PropertyMock, return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), + patch("workload.BalancerWorkload.stop"), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True + ), patch( "core.cluster.ClusterState.ready_to_start", new_callable=PropertyMock, @@ -247,14 +259,14 @@ def test_upgrade_succeeds(harness: Harness, upgrade_func: str): ), ): mock_event = MagicMock() - getattr(harness.charm.upgrade, upgrade_func)(mock_event) + getattr(harness.charm.broker.upgrade, upgrade_func)(mock_event) assert patched_restart.call_count or patched_start.call_count - assert harness.charm.upgrade.state == "completed" + assert harness.charm.broker.upgrade.state == "completed" @pytest.mark.skipif(SUBSTRATE == "k8s", reason="Upgrade granted not used on K8s charms") -def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness: Harness): +def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness: Harness[KafkaCharm]): with harness.hooks_disabled(): harness.set_leader(True) @@ -268,10 +280,13 @@ def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness: Harness): patch("workload.KafkaWorkload.stop"), patch("workload.KafkaWorkload.restart"), patch("workload.KafkaWorkload.install", return_value=True), - patch("charm.KafkaCharm.healthy", new_callable=PropertyMock, return_value=True), + patch( + "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True + ), + patch("workload.BalancerWorkload.stop"), patch("events.upgrade.KafkaUpgrade.on_upgrade_changed") as patched_upgrade, ): mock_event = MagicMock() - harness.charm.upgrade._on_upgrade_granted(mock_event) + harness.charm.broker.upgrade._on_upgrade_granted(mock_event) patched_upgrade.assert_called_once() diff --git a/tests/unit/test_workload.py b/tests/unit/test_workload.py index 9f30153d..2ddcf000 100644 --- a/tests/unit/test_workload.py +++ b/tests/unit/test_workload.py @@ -12,7 +12,10 @@ if SUBSTRATE == "vm": from charms.operator_libs_linux.v1.snap import SnapError -pytestmark = pytest.mark.skipif(SUBSTRATE == "k8s", reason="workload tests not needed for K8s") +pytestmark = [ + pytest.mark.broker, + pytest.mark.skipif(SUBSTRATE == "k8s", reason="workload tests not needed for K8s"), +] def test_run_bin_command_args(patched_exec): diff --git a/tox.ini b/tox.ini index f4362a60..45e8e389 100644 --- a/tox.ini +++ b/tox.ini @@ -28,6 +28,7 @@ set_env = upgrade: TEST_FILE=test_upgrade.py tls: TEST_FILE=test_tls.py ha: TEST_FILE=ha/test_ha.py + balancer: TEST_FILE=test_balancer.py pass_env = PYTHONPATH @@ -86,7 +87,7 @@ commands = poetry install --no-root --with integration poetry run pytest -vv --tb native --log-cli-level=INFO -s {posargs} {[vars]tests_path}/integration/ -[testenv:integration-{charm,provider,scaling,password-rotation,tls,upgrade,ha}] +[testenv:integration-{charm,provider,scaling,password-rotation,tls,upgrade,ha,balancer}] description = Run integration tests set_env = {[testenv]set_env}