-
Notifications
You must be signed in to change notification settings - Fork 12
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
DPE-5178 Adopt admin-address throught out #516
Changes from 9 commits
d9660fb
89551d2
814c6e9
480b5eb
57cdca0
5cfdfda
43d1d52
654671b
576f137
e27cc28
2a45743
fb54b92
e65a4aa
408bc7b
58bfb64
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
# Copyright 2024 Canonical Ltd. | ||
# See LICENSE file for licensing details. | ||
|
||
import logging | ||
|
||
import pytest | ||
from mysql.connector.errors import OperationalError | ||
from pytest_operator.plugin import OpsTest | ||
|
||
from .connector import create_db_connections | ||
from .helpers import get_unit_ip | ||
from .juju_ import run_action | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
MYSQL_APP_NAME = "mysql" | ||
TEST_APP_NAME = "app" | ||
CONNECTIONS = 10 | ||
|
||
|
||
@pytest.mark.group(1) | ||
@pytest.mark.abort_on_fail | ||
async def test_build_and_deploy(ops_test: OpsTest) -> None: | ||
"""Build the charm and deploy 1 units to ensure a cluster is formed.""" | ||
charm = await ops_test.build_charm(".") | ||
config = {"profile-limit-memory": "2000", "experimental-max-connections": CONNECTIONS} | ||
|
||
await ops_test.model.deploy( | ||
charm, | ||
application_name=MYSQL_APP_NAME, | ||
config=config, | ||
num_units=1, | ||
base="[email protected]", | ||
) | ||
|
||
|
||
@pytest.mark.group(1) | ||
@pytest.mark.abort_on_fail | ||
async def test_deploy_and_relate_test_app(ops_test: OpsTest) -> None: | ||
config = {"auto_start_writes": False, "sleep_interval": "500"} | ||
logger.info("Deploying test app") | ||
await ops_test.model.deploy( | ||
"mysql-test-app", | ||
application_name=TEST_APP_NAME, | ||
num_units=1, | ||
base="[email protected]", | ||
config=config, | ||
channel="latest/edge", | ||
) | ||
|
||
logger.info("Relating test app to mysql") | ||
await ops_test.model.relate(MYSQL_APP_NAME, f"{TEST_APP_NAME}:database") | ||
|
||
logger.info("Waiting all to be active") | ||
await ops_test.model.block_until( | ||
lambda: all(unit.workload_status == "active" for unit in ops_test.model.units.values()), | ||
timeout=60 * 10, | ||
wait_period=5, | ||
) | ||
|
||
|
||
@pytest.mark.group(1) | ||
@pytest.mark.abort_on_fail | ||
async def test_saturate_max_connections(ops_test: OpsTest) -> None: | ||
app_unit = ops_test.model.applications[TEST_APP_NAME].units[0] | ||
mysql_unit = ops_test.model.applications[MYSQL_APP_NAME].units[0] | ||
|
||
host_ip = await get_unit_ip(ops_test, mysql_unit.name) | ||
logger.info("Running action to get app connection data") | ||
credentials = await run_action(app_unit, "get-client-connection-data") | ||
del credentials["return-code"] | ||
credentials["host"] = host_ip | ||
|
||
logger.info(f"Creating {CONNECTIONS} connections") | ||
connections = create_db_connections(CONNECTIONS, **credentials) | ||
assert isinstance(connections, list), "Connections not created" | ||
|
||
logger.info("Ensure all connections are established") | ||
for conn in connections: | ||
assert conn.is_connected(), "Connection failed to establish" | ||
|
||
assert len(connections) == CONNECTIONS, "Not all connections were established" | ||
|
||
logger.info("Ensure no more client connections are possible") | ||
|
||
with pytest.raises(OperationalError): | ||
# exception raised when too many connections are attempted | ||
create_db_connections(1, **credentials) | ||
|
||
logger.info("Get cluster status while connections are saturated") | ||
_ = await run_action(mysql_unit, "get-cluster-status") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should we also add a check that the admin port is unaffected? or at least admin connections are active? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Getting the cluster status rely on the admin connection. So when client connections are saturated, the action passing does just that. If the action fails, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
sleep added to slow down tests so I could validate connections in database. Left here for the same reason, since the test is fairly quick
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It smells to me, it is a workaround for https://warthogs.atlassian.net/browse/DPE-5340 (we should sent peer details when we ready to accept traffic only).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not a workaround for that issue. Just to be able to observe the test while developing.
The error on the nightly tests seems a different issue to me, since the router is connecting to a standby cluster (and we have yet to change/discuss if the router should connect to database through router to pick leadership changes on async cases)