Skip to content

Commit

Permalink
Chore: Fix pylint issues with new version of pylint (#150)
Browse files Browse the repository at this point in the history
* Chore: Fix pylint issues with new version of pylint

* Complete remove the Base Class|Protocol `TokenManager`
  • Loading branch information
addyess authored Sep 20, 2024
1 parent 8d1f9e2 commit d04f5f4
Show file tree
Hide file tree
Showing 14 changed files with 141 additions and 131 deletions.
10 changes: 5 additions & 5 deletions charms/worker/k8s/lib/charms/k8s/v0/k8sd_api_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,12 @@ class ErrorCodes(enum.Enum):
"""Enumerate the response codes from the k8s api.
Attributes:
StatusNodeUnavailable: returned when the node isn't in the cluster
StatusNodeInUse: returned when the node is in the cluster already
STATUS_NODE_UNAVAILABLE: returned when the node isn't in the cluster
STATUS_NODE_IN_USE: returned when the node is in the cluster already
"""

StatusNodeUnavailable = 520
StatusNodeInUse = 521
STATUS_NODE_UNAVAILABLE = 520
STATUS_NODE_IN_USE = 521


class K8sdAPIManagerError(Exception):
Expand Down Expand Up @@ -320,7 +320,7 @@ class UserFacingClusterConfig(BaseModel):
cloud_provider: Optional[str] = Field(None, alias="cloud-provider")


class UserFacingDatastoreConfig(BaseModel, allow_population_by_field_name=True): # type: ignore[call-arg]
class UserFacingDatastoreConfig(BaseModel, allow_population_by_field_name=True):
"""Aggregated configuration model for the user-facing datastore aspects of a cluster.
Attributes:
Expand Down
2 changes: 1 addition & 1 deletion charms/worker/k8s/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ charm-lib-interface-external-cloud-provider @ git+https://github.com/charmed-kub
charm-lib-node-base @ git+https://github.com/charmed-kubernetes/layer-kubernetes-node-base@9b212854e768f13c26cc907bed51444e97e51b50#subdirectory=ops
charm-lib-reconciler @ git+https://github.com/charmed-kubernetes/charm-lib-reconciler@f818cc30d1a22be43ffdfecf7fbd9c3fd2967502
cosl==0.0.26
ops==2.16.0
ops==2.16.1
pydantic==1.10.18
PyYAML==6.0.2
tomli == 2.0.1
Expand Down
23 changes: 10 additions & 13 deletions charms/worker/k8s/scripts/update_alert_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ def download_and_process_rule_files(temp_dir: Path):
source_url = f"{SOURCE}/{file}"
temp_file = temp_dir / file
try:
logging.info(f"Downloading {source_url}")
logging.info("Downloading %s", source_url)
with urlopen(source_url) as response: # nosec
process_rule_file(response, temp_file, source_url)
except URLError as e:
logging.error(f"Error fetching dashboard data: {e}")
logging.error("Error fetching dashboard data: %s", e)


def process_rule_file(contents, destination_file: Path, source_url: str):
Expand Down Expand Up @@ -88,7 +88,7 @@ def process_rule_file(contents, destination_file: Path, source_url: str):

with destination_file.open("w") as file:
file.write("\n".join(data))
logging.info(f"Processed and saved to {destination_file}")
logging.info("Processed and saved to %s", destination_file)


def move_processed_files(temp_dir):
Expand All @@ -100,28 +100,25 @@ def move_processed_files(temp_dir):
for temp_file in temp_dir.iterdir():
final_path = ALERT_RULES_DIR / temp_file.name
shutil.move(str(temp_file), str(final_path))
logging.info(f"Moved {temp_file.name} to {final_path}")
logging.info("Moved %s to %s", temp_file.name, final_path)


def apply_patches():
"""Apply patches to the downloaded and processed rule files."""
for patch_file in PATCHES_DIR.glob("*"):
logging.info(f"Applying patch {patch_file}")
logging.info("Applying patch %s", patch_file)
subprocess.check_call(["/usr/bin/git", "apply", str(patch_file)])


def main():
"""Fetch, process, and save AlertManager rules."""
with TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
try:
download_and_process_rule_files(temp_path)
shutil.rmtree(ALERT_RULES_DIR, ignore_errors=True)
ALERT_RULES_DIR.mkdir(parents=True)
move_processed_files(temp_path)
apply_patches()
except Exception as e:
logging.error("An error occurred: %s" % e)
download_and_process_rule_files(temp_path)
shutil.rmtree(ALERT_RULES_DIR, ignore_errors=True)
ALERT_RULES_DIR.mkdir(parents=True)
move_processed_files(temp_path)
apply_patches()


if __name__ == "__main__":
Expand Down
8 changes: 4 additions & 4 deletions charms/worker/k8s/scripts/update_dashboards.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def fetch_dashboards(source_url: str):
with urlopen(source_url) as response: # nosec
return yaml.safe_load(response.read())
except URLError as e:
logging.error(f"Error fetching dashboard data: {e}")
logging.error("Error fetching dashboard data: %s", e)
return None


Expand Down Expand Up @@ -101,17 +101,17 @@ def prepare_dashboard(json_value):
return json.dumps(json_value, indent=4).replace("$datasource", "$prometheusds")


def save_dashboard_to_file(name, data):
def save_dashboard_to_file(name, data: str):
"""Save the prepared dashboard JSON to a file.
Args:
name (str): name of the dashboard file
data (str): file content to write
"""
filepath = os.path.join(TARGET_DIR, name)
with open(filepath, "w") as f:
with open(filepath, "w", encoding="utf-8") as f:
f.write(data)
logging.info(f"Dashboard '{name}' saved to {filepath}")
logging.info("Dashboard '%s' saved to %s", name, filepath)


def main():
Expand Down
21 changes: 11 additions & 10 deletions charms/worker/k8s/src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def _cluster_departing_unit(event: ops.EventBase) -> Union[Literal[False], ops.U
isinstance(event, ops.RelationDepartedEvent)
and event.relation.name in ["k8s-cluster", "cluster"]
and event.departing_unit
or False
)


Expand Down Expand Up @@ -131,7 +132,7 @@ def __init__(self, *args):
self.labeller = LabelMaker(
self, kubeconfig_path=self._internal_kubeconfig, kubectl=KUBECTL_PATH
)
self._stored.set_default(is_dying=False, cluster_name="")
self._stored.set_default(is_dying=False, cluster_name=str())

self.cos_agent = COSAgentProvider(
self,
Expand Down Expand Up @@ -206,8 +207,7 @@ def get_cluster_name(self) -> str:
Returns:
the cluster name.
"""
unit, node = self.unit.name, self.get_node_name()
if not self._stored.cluster_name:
if self._stored.cluster_name == "":
if self.lead_control_plane and self.api_manager.is_cluster_bootstrapped():
# TODO: replace with API call once available from the snap
self._stored.cluster_name = str(uuid.uuid4())
Expand All @@ -221,8 +221,9 @@ def get_cluster_name(self) -> str:
):
self._stored.cluster_name = self.collector.cluster_name(relation, True)

unit, node = self.unit.name, self.get_node_name()
log.info("%s(%s) current cluster-name=%s", unit, node, self._stored.cluster_name)
return self._stored.cluster_name
return str(self._stored.cluster_name)

def get_node_name(self) -> str:
"""Return the lowercase hostname.
Expand Down Expand Up @@ -282,8 +283,8 @@ def _bootstrap_k8s_snap(self):
bootstrap_config = BootstrapConfig()
self._configure_datastore(bootstrap_config)
self._configure_cloud_provider(bootstrap_config)
bootstrap_config.service_cidr = self.config["service-cidr"]
bootstrap_config.control_plane_taints = self.config["register-with-taints"].split()
bootstrap_config.service_cidr = str(self.config["service-cidr"])
bootstrap_config.control_plane_taints = str(self.config["register-with-taints"]).split()
bootstrap_config.extra_sans = [_get_public_address()]

status.add(ops.MaintenanceStatus("Bootstrapping Cluster"))
Expand All @@ -309,7 +310,7 @@ def _config_containerd_registries(self):
registries, config = [], ""
containerd_relation = self.model.get_relation("containerd")
if self.is_control_plane:
config = self.config["containerd_custom_registries"]
config = str(self.config["containerd_custom_registries"])
registries = containerd.parse_registries(config)
else:
registries = containerd.recover(containerd_relation)
Expand Down Expand Up @@ -399,7 +400,7 @@ def _revoke_cluster_tokens(self, event: ops.EventBase):
"""
log.info("Garbage collect cluster tokens")
to_remove = None
if self._stored.is_dying:
if self._stored.is_dying is True:
to_remove = self.unit
elif unit := _cluster_departing_unit(event):
to_remove = unit
Expand Down Expand Up @@ -650,7 +651,7 @@ def _evaluate_removal(self, event: ops.EventBase) -> bool:
Returns:
True if being removed, otherwise False
"""
if self._stored.is_dying:
if self._stored.is_dying is True:
pass
elif (unit := _cluster_departing_unit(event)) and unit == self.unit:
# Juju says I am being removed
Expand All @@ -677,7 +678,7 @@ def _evaluate_removal(self, event: ops.EventBase) -> bool:
elif isinstance(event, (ops.RemoveEvent, ops.StopEvent)):
# If I myself am dying, its me!
self._stored.is_dying = True
return self._stored.is_dying
return bool(self._stored.is_dying)

def _is_node_present(self, node: str = "") -> bool:
"""Determine if node is in the kubernetes cluster.
Expand Down
7 changes: 3 additions & 4 deletions charms/worker/k8s/src/containerd.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def __init__(self, *args, **kwargs):
args: construction positional arguments
kwargs: construction keyword arguments
"""
super(Registry, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
if not self.host and (host := self.url.host):
self.host = host

Expand Down Expand Up @@ -177,11 +177,10 @@ def auth_config_header(self) -> Dict[str, Any]:
log.debug("Configure basic auth for %s (%s)", self.url, self.host)
v = self.username.get_secret_value() + ":" + self.password.get_secret_value()
return {"Authorization": "Basic " + base64.b64encode(v.encode()).decode()}
elif self.identitytoken:
if self.identitytoken:
log.debug("Configure bearer token for %s (%s)", self.url, self.host)
return {"Authorization": "Bearer " + self.identitytoken.get_secret_value()}
else:
return {}
return {}

@property
def hosts_toml(self) -> Dict[str, Any]:
Expand Down
7 changes: 4 additions & 3 deletions charms/worker/k8s/src/snap.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,7 @@ def _parse_management_arguments() -> List[SnapArgument]:
if not revision.exists():
raise snap_lib.SnapError(f"Failed to find file={revision}")
try:
with revision.open() as f:
body = yaml.safe_load(f)
body = yaml.safe_load(revision.read_text(encoding="utf-8"))
except yaml.YAMLError as e:
log.error("Failed to load file=%s, %s", revision, e)
raise snap_lib.SnapError(f"Failed to load file={revision}")
Expand All @@ -117,7 +116,9 @@ def _parse_management_arguments() -> List[SnapArgument]:
raise snap_lib.SnapError(f"Failed to find revision for arch={arch}")

try:
args: List[SnapArgument] = [parse_obj_as(SnapArgument, arg) for arg in arch_spec] # type: ignore[arg-type]
args: List[SnapArgument] = [
parse_obj_as(SnapArgument, arg) for arg in arch_spec # type: ignore[arg-type]
]
except ValidationError as e:
log.warning("Failed to validate args=%s (%s)", arch_spec, e)
raise snap_lib.SnapError("Failed to validate snap args")
Expand Down
Loading

0 comments on commit d04f5f4

Please sign in to comment.