Skip to content

Commit

Permalink
[DPE-5324] Increase linting rules (#740)
Browse files Browse the repository at this point in the history
* Increase linting rules

* Tweak quotes

* Use literals

* Literal database

* Fix linter tweaks

* Bump timeout

* Raise on false

* Fix generation

* Restore statement

* Bump patch version

* Renamed rules

* Bump libpatch
  • Loading branch information
dragomirp authored Dec 19, 2024
1 parent 7da9c8c commit 0a1c149
Show file tree
Hide file tree
Showing 38 changed files with 410 additions and 385 deletions.
195 changes: 99 additions & 96 deletions lib/charms/postgresql_k8s/v0/postgresql.py

Large diffs are not rendered by default.

7 changes: 2 additions & 5 deletions lib/charms/postgresql_k8s/v0/postgresql_tls.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version.
LIBPATCH = 10
LIBPATCH = 11

logger = logging.getLogger(__name__)
SCOPE = "unit"
Expand Down Expand Up @@ -82,10 +82,7 @@ def _on_set_tls_private_key(self, event: ActionEvent) -> None:

def _request_certificate(self, param: Optional[str]):
"""Request a certificate to TLS Certificates Operator."""
if param is None:
key = generate_private_key()
else:
key = self._parse_tls_file(param)
key = generate_private_key() if param is None else self._parse_tls_file(param)

csr = generate_csr(
private_key=key,
Expand Down
11 changes: 9 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ line-length = 99

[tool.ruff.lint]
explicit-preview-rules = true
select = ["A", "E", "W", "F", "C", "N", "D", "I001", "CPY001"]
select = ["A", "E", "W", "F", "C", "N", "D", "I001", "B", "CPY", "RUF", "S", "SIM", "UP", "TC"]
extend-ignore = [
"D203",
"D204",
Expand All @@ -130,12 +130,19 @@ extend-ignore = [
ignore = ["E501", "D107"]

[tool.ruff.lint.per-file-ignores]
"tests/*" = ["D100", "D101", "D102", "D103", "D104"]
"tests/*" = [
"D100", "D101", "D102", "D103", "D104",
# Asserts
"B011",
# Disable security checks for tests
"S",
]

[tool.ruff.lint.flake8-copyright]
# Check for properly formatted copyright header in each file
author = "Canonical Ltd."
notice-rgx = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+"
min-file-size = 1

[tool.ruff.lint.mccabe]
max-complexity = 10
Expand Down
2 changes: 1 addition & 1 deletion src/arch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def is_wrong_architecture() -> bool:
)
return False

with open(manifest_file_path, "r") as file:
with open(manifest_file_path) as file:
manifest = file.read()
hw_arch = os.uname().machine
if ("amd64" in manifest and hw_arch == "x86_64") or (
Expand Down
75 changes: 31 additions & 44 deletions src/backups.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,15 +109,16 @@ def _can_initialise_stanza(self) -> bool:
# Don't allow stanza initialisation if this unit hasn't started the database
# yet and either hasn't joined the peer relation yet or hasn't configured TLS
# yet while other unit already has TLS enabled.
if not self.charm._patroni.member_started and (
(len(self.charm._peers.data.keys()) == 2)
or (
"tls" not in self.charm.unit_peer_data
and any("tls" in unit_data for _, unit_data in self.charm._peers.data.items())
return not (
not self.charm._patroni.member_started
and (
(len(self.charm._peers.data.keys()) == 2)
or (
"tls" not in self.charm.unit_peer_data
and any("tls" in unit_data for _, unit_data in self.charm._peers.data.items())
)
)
):
return False
return True
)

def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]:
"""Validates whether this unit can perform a backup."""
Expand Down Expand Up @@ -182,18 +183,17 @@ def can_use_s3_repository(self) -> Tuple[bool, Optional[str]]:
])
if error != "":
raise Exception(error)
system_identifier_from_instance = [
system_identifier_from_instance = next(
line
for line in system_identifier_from_instance.splitlines()
if "Database system identifier" in line
][0].split(" ")[-1]
).split(" ")[-1]
system_identifier_from_stanza = str(stanza.get("db")[0]["system-id"])
if system_identifier_from_instance != system_identifier_from_stanza:
logger.debug(
f"can_use_s3_repository: incompatible system identifier s3={system_identifier_from_stanza}, local={system_identifier_from_instance}"
)
return False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE

return True, None

def _construct_endpoint(self, s3_parameters: Dict) -> str:
Expand Down Expand Up @@ -278,7 +278,7 @@ def _change_connectivity_to_database(self, connectivity: bool) -> None:
self.charm.update_config(is_creating_backup=True)

def _execute_command(
self, command: List[str], timeout: float = None, stream: bool = False
self, command: List[str], timeout: Optional[float] = None, stream: bool = False
) -> Tuple[Optional[str], Optional[str]]:
"""Execute a command in the workload container."""
try:
Expand Down Expand Up @@ -338,17 +338,7 @@ def _format_backup_list(self, backup_list) -> str:
path,
) in backup_list:
backups.append(
"{:<20s} | {:<19s} | {:<8s} | {:<20s} | {:<23s} | {:<20s} | {:<20s} | {:<8s} | {:s}".format(
backup_id,
backup_action,
backup_status,
reference,
lsn_start_stop,
start,
stop,
backup_timeline,
path,
)
f"{backup_id:<20s} | {backup_action:<19s} | {backup_status:<8s} | {reference:<20s} | {lsn_start_stop:<23s} | {start:<20s} | {stop:<20s} | {backup_timeline:<8s} | {path:s}"
)
return "\n".join(backups)

Expand Down Expand Up @@ -395,7 +385,7 @@ def _generate_backup_list_output(self) -> str:
backup_path,
))

for timeline, (timeline_stanza, timeline_id) in self._list_timelines().items():
for timeline, (_, timeline_id) in self._list_timelines().items():
backup_list.append((
timeline,
"restore",
Expand Down Expand Up @@ -648,7 +638,7 @@ def _is_primary_pgbackrest_service_running(self) -> bool:
try:
primary = self.charm._patroni.get_primary()
except (RetryError, ConnectionError) as e:
logger.error(f"failed to get primary with error {str(e)}")
logger.error(f"failed to get primary with error {e!s}")
return False

if primary is None:
Expand All @@ -666,7 +656,7 @@ def _is_primary_pgbackrest_service_running(self) -> bool:
])
except ExecError as e:
logger.warning(
f"Failed to contact pgBackRest TLS server on {primary_endpoint} with error {str(e)}"
f"Failed to contact pgBackRest TLS server on {primary_endpoint} with error {e!s}"
)
return False

Expand Down Expand Up @@ -798,7 +788,7 @@ def _on_create_backup_action(self, event) -> None: # noqa: C901
Model Name: {self.model.name}
Application Name: {self.model.app.name}
Unit Name: {self.charm.unit.name}
Juju Version: {str(juju_version)}
Juju Version: {juju_version!s}
"""
if not self._upload_content_to_s3(
metadata,
Expand Down Expand Up @@ -862,7 +852,7 @@ def _on_create_backup_action(self, event) -> None: # noqa: C901
f"backup/{self.stanza_name}/{backup_id}/backup.log",
s3_parameters,
)
error_message = f"Failed to backup PostgreSQL with error: {str(e)}"
error_message = f"Failed to backup PostgreSQL with error: {e!s}"
logger.error(f"Backup failed: {error_message}")
event.fail(error_message)
else:
Expand Down Expand Up @@ -924,7 +914,7 @@ def _on_list_backups_action(self, event) -> None:
event.set_results({"backups": formatted_list})
except ExecError as e:
logger.exception(e)
event.fail(f"Failed to list PostgreSQL backups with error: {str(e)}")
event.fail(f"Failed to list PostgreSQL backups with error: {e!s}")

def _on_restore_action(self, event): # noqa: C901
"""Request that pgBackRest restores a backup."""
Expand All @@ -943,10 +933,8 @@ def _on_restore_action(self, event): # noqa: C901
logger.info("Validating provided backup-id and restore-to-time")
backups = self._list_backups(show_failed=False)
timelines = self._list_timelines()
is_backup_id_real = backup_id and backup_id in backups.keys()
is_backup_id_timeline = (
backup_id and not is_backup_id_real and backup_id in timelines.keys()
)
is_backup_id_real = backup_id and backup_id in backups
is_backup_id_timeline = backup_id and not is_backup_id_real and backup_id in timelines
if backup_id and not is_backup_id_real and not is_backup_id_timeline:
error_message = f"Invalid backup-id: {backup_id}"
logger.error(f"Restore failed: {error_message}")
Expand Down Expand Up @@ -1001,7 +989,7 @@ def _on_restore_action(self, event): # noqa: C901
try:
self.container.stop(self.charm._postgresql_service)
except ChangeError as e:
error_message = f"Failed to stop database service with error: {str(e)}"
error_message = f"Failed to stop database service with error: {e!s}"
logger.error(f"Restore failed: {error_message}")
event.fail(error_message)
return
Expand All @@ -1025,9 +1013,7 @@ def _on_restore_action(self, event): # noqa: C901
except ApiError as e:
# If previous PITR restore was unsuccessful, there are no such endpoints.
if "restore-to-time" not in self.charm.app_peer_data:
error_message = (
f"Failed to remove previous cluster information with error: {str(e)}"
)
error_message = f"Failed to remove previous cluster information with error: {e!s}"
logger.error(f"Restore failed: {error_message}")
event.fail(error_message)
self._restart_database()
Expand All @@ -1037,7 +1023,7 @@ def _on_restore_action(self, event): # noqa: C901
try:
self._empty_data_files()
except ExecError as e:
error_message = f"Failed to remove contents of the data directory with error: {str(e)}"
error_message = f"Failed to remove contents of the data directory with error: {e!s}"
logger.error(f"Restore failed: {error_message}")
event.fail(error_message)
self._restart_database()
Expand Down Expand Up @@ -1188,7 +1174,7 @@ def _render_pgbackrest_conf_file(self) -> bool:
)

# Open the template pgbackrest.conf file.
with open("templates/pgbackrest.conf.j2", "r") as file:
with open("templates/pgbackrest.conf.j2") as file:
template = Template(file.read())
# Render the template file with the correct values.
rendered = template.render(
Expand Down Expand Up @@ -1217,13 +1203,14 @@ def _render_pgbackrest_conf_file(self) -> bool:
)

# Render the logrotate configuration file.
with open("templates/pgbackrest.logrotate.j2", "r") as file:
with open("templates/pgbackrest.logrotate.j2") as file:
template = Template(file.read())
self.container.push(PGBACKREST_LOGROTATE_FILE, template.render())
self.container.push(
"/home/postgres/rotate_logs.py",
open("src/rotate_logs.py", "r").read(),
)
with open("src/rotate_logs.py") as f:
self.container.push(
"/home/postgres/rotate_logs.py",
f.read(),
)
self.container.start(self.charm.rotate_logs_service)

return True
Expand Down
44 changes: 26 additions & 18 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,10 @@
PASSWORD_USERS = [*SYSTEM_USERS, "patroni"]


class CannotConnectError(Exception):
"""Cannot run smoke check on connected Database."""


@trace_charm(
tracing_endpoint="tracing_endpoint",
extra_types=(
Expand Down Expand Up @@ -261,7 +265,7 @@ def _pebble_log_forwarding_supported(self) -> bool:
from ops.jujuversion import JujuVersion

juju_version = JujuVersion.from_environ()
return juju_version > JujuVersion(version=str("3.3"))
return juju_version > JujuVersion(version="3.3")

def _generate_metrics_jobs(self, enable_tls: bool) -> Dict:
"""Generate spec for Prometheus scraping."""
Expand Down Expand Up @@ -679,7 +683,7 @@ def _on_config_changed(self, event) -> None:
)
return

def enable_disable_extensions(self, database: str = None) -> None:
def enable_disable_extensions(self, database: Optional[str] = None) -> None:
"""Enable/disable PostgreSQL extensions set through config options.
Args:
Expand Down Expand Up @@ -1223,11 +1227,11 @@ def _on_set_password(self, event: ActionEvent) -> None:
other_cluster_primary = self._patroni.get_primary(
alternative_endpoints=other_cluster_endpoints
)
other_cluster_primary_ip = [
other_cluster_primary_ip = next(
replication_offer_relation.data[unit].get("private-address")
for unit in replication_offer_relation.units
if unit.name.replace("/", "-") == other_cluster_primary
][0]
)
try:
self.postgresql.update_user_password(
username, password, database_host=other_cluster_primary_ip
Expand Down Expand Up @@ -1417,7 +1421,7 @@ def _on_update_status(self, _) -> None:
and services[0].current != ServiceStatus.ACTIVE
):
logger.warning(
"%s pebble service inactive, restarting service" % self._postgresql_service
f"{self._postgresql_service} pebble service inactive, restarting service"
)
try:
container.restart(self._postgresql_service)
Expand Down Expand Up @@ -1622,7 +1626,9 @@ def _remove_from_endpoints(self, endpoints: List[str]) -> None:
self._update_endpoints(endpoints_to_remove=endpoints)

def _update_endpoints(
self, endpoint_to_add: str = None, endpoints_to_remove: List[str] = None
self,
endpoint_to_add: Optional[str] = None,
endpoints_to_remove: Optional[List[str]] = None,
) -> None:
"""Update members IPs."""
# Allow leader to reset which members are part of the cluster.
Expand Down Expand Up @@ -1796,7 +1802,7 @@ def _restart(self, event: RunWithLock) -> None:
for attempt in Retrying(wait=wait_fixed(3), stop=stop_after_delay(300)):
with attempt:
if not self._can_connect_to_postgresql:
assert False
raise CannotConnectError
except Exception:
logger.exception("Unable to reconnect to postgresql")

Expand All @@ -1821,7 +1827,8 @@ def _can_connect_to_postgresql(self) -> bool:
try:
for attempt in Retrying(stop=stop_after_delay(30), wait=wait_fixed(3)):
with attempt:
assert self.postgresql.get_postgresql_timezones()
if not self.postgresql.get_postgresql_timezones():
raise CannotConnectError
except RetryError:
logger.debug("Cannot connect to database")
return False
Expand Down Expand Up @@ -1895,16 +1902,17 @@ def update_config(self, is_creating_backup: bool = False) -> bool:
# Restart the monitoring service if the password was rotated
container = self.unit.get_container("postgresql")
current_layer = container.get_plan()
if metrics_service := current_layer.services[self._metrics_service]:
if not metrics_service.environment.get("DATA_SOURCE_NAME", "").startswith(
f"user={MONITORING_USER} password={self.get_secret('app', MONITORING_PASSWORD_KEY)} "
):
container.add_layer(
self._metrics_service,
Layer({"services": {self._metrics_service: self._generate_metrics_service()}}),
combine=True,
)
container.restart(self._metrics_service)
if (
metrics_service := current_layer.services[self._metrics_service]
) and not metrics_service.environment.get("DATA_SOURCE_NAME", "").startswith(
f"user={MONITORING_USER} password={self.get_secret('app', MONITORING_PASSWORD_KEY)} "
):
container.add_layer(
self._metrics_service,
Layer({"services": {self._metrics_service: self._generate_metrics_service()}}),
combine=True,
)
container.restart(self._metrics_service)

return True

Expand Down
Loading

0 comments on commit 0a1c149

Please sign in to comment.