From 00fffe446328595332b57c0be0892245ef8322e9 Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Fri, 19 Jul 2019 16:10:22 -0700 Subject: [PATCH 01/19] Initial work on E2E tests. - capture_dump.py, which spins up a multi-node CCM cluster with the exporter installed, and captures the metrics output from each node. --- test/capture_dump.py | 249 +++++++++++++++++++++++++++++++++++++++++++ test/schema.yaml | 31 ++++++ test/setup.py | 11 ++ 3 files changed, 291 insertions(+) create mode 100644 test/capture_dump.py create mode 100644 test/schema.yaml create mode 100644 test/setup.py diff --git a/test/capture_dump.py b/test/capture_dump.py new file mode 100644 index 0000000..91e8e3f --- /dev/null +++ b/test/capture_dump.py @@ -0,0 +1,249 @@ +# spin up a multi-node CCM cluster with cassandra-exporter installed, apply a schema, and capture the metrics output + +import argparse +import re +import shutil +import subprocess +import time +import zipfile + +import ccmlib.cluster +import ccmlib.node +import os +from enum import Enum, auto +from pathlib import Path +import contextlib +import urllib.request +import cassandra.cluster +import cassandra.connection +import tempfile +import xml.etree.ElementTree as ElementTree +from collections import namedtuple + +import yaml + + +def schema_yaml(path): + path = existing_file(path) + + with open(path, 'r') as f: + schema_yaml = yaml.load(f, Loader=yaml.SafeLoader) + + if not isinstance(schema_yaml, list): + raise argparse.ArgumentTypeError(f'root of the schema YAML must be a list. Got a {type(schema_yaml).__name__}.') + + for i, o in enumerate(schema_yaml): + if not isinstance(o, str): + raise argparse.ArgumentTypeError(f'schema YAML must be a list of statement strings. Item {i} is a {type(o).__name__}.') + + return schema_yaml + + +def existing_file(path): + path = Path(path) + if not path.exists(): + raise argparse.ArgumentTypeError(f'file "{path}" does not exist.') + + if not path.is_file(): + raise argparse.ArgumentTypeError(f'"{path}" is not a regular file.') + + return path + + +def cluster_directory(path): + path = Path(path) + + if path.exists(): + if not path.is_dir(): + raise argparse.ArgumentTypeError(f'"{path}" must be a directory.') + + if next(path.iterdir(), None) is not None: + raise argparse.ArgumentTypeError(f'"{path}" must be an empty directory.') + + return path + + +def output_directory(path): + path = Path(path) + + if path.exists(): + if not path.is_dir(): + raise argparse.ArgumentTypeError(f'"{path}" must be a directory.') + + # the empty directory check is done later, since it can be skipped with --overwrite-output + + return path + + +class ExporterJar(namedtuple('ExporterJar', ['path', 'type'])): + class ExporterType(Enum): + AGENT = ('Premain-Class', 'com.zegelin.cassandra.exporter.Agent') + STANDALONE = ('Main-Class', 'com.zegelin.cassandra.exporter.Application') + + @classmethod + def from_path(cls, path): + path = existing_file(path) + + # determine the JAR type (agent or standalone) via the Main/Premain class + try: + with zipfile.ZipFile(path) as zf: + manifest = zf.open('META-INF/MANIFEST.MF').readlines() + + def parse_line(line): + m = re.match('(.+): (.+)', line.decode("utf-8").strip()) + return None if m is None else m.groups() + + manifest = dict(filter(None, map(parse_line, manifest))) + + type = next(iter([t for t in ExporterJar.ExporterType if t.value in manifest.items()]), None) + if type is None: + raise argparse.ArgumentTypeError(f'"{path}" is not a cassandra-exporter jar.') + + return cls(path, type) + + except (zipfile.BadZipFile, KeyError): + raise argparse.ArgumentTypeError(f'"{path}" is not a jar.') + + +def default_jar_path(): + project_dir = Path(__file__).parents[1] + try: + root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() + project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text + + return project_dir / f'agent/target/cassandra-exporter-agent-{project_version}.jar' + + except: + return None + + +def default_schema_path(): + test_dir = Path(__file__).parent + return test_dir / "schema.yaml" + + +parser = argparse.ArgumentParser() +parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") +parser.add_argument('output_directory', type=output_directory, help="location to write metrics dumps", metavar="OUTPUT_DIRECTORY") + +parser.add_argument('-o', '--overwrite-output', action='store_true', help="don't abort when the output directory exists or is not empty") + +parser.add_argument('--cluster-directory', type=cluster_directory, help="location to install Cassandra. Must be empty or not exist. (default is a temporary directory)") +parser.add_argument('--keep-cluster-directory', type=bool, help="don't delete the cluster directory on exit") + + +parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", default=2) +parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", default=3) +parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", default=1) + +parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(default_jar_path())) +parser.add_argument('-s', '--schema', type=schema_yaml, help="CQL schema to apply (default: %(default)s)", default=str(default_schema_path())) + +args = parser.parse_args() + + +if args.cluster_directory is None: + args.cluster_directory = Path(tempfile.mkdtemp()) / "cluster" + +if args.cluster_directory.exists(): + args.cluster_directory.rmdir() # CCM wants to create this + + +if args.output_directory.exists() and not args.overwrite_output: + if next(args.output_directory.iterdir(), None) is not None: + raise argparse.ArgumentTypeError(f'"{args.output_directory}" must be an empty directory.') + +os.makedirs(args.output_directory, exist_ok=True) + + +ccm_cluster = ccmlib.cluster.Cluster( + path=args.cluster_directory.parent, + name=args.cluster_directory.name, + version=args.cassandra_version, + create_directory=True # if this is false, various config files wont be created... +) + +ccm_cluster.populate(nodes=args.nodes * args.racks * args.datacenters) + + +def shutdown_cluster(): + print('Stopping cluster...') + ccm_cluster.stop() + + +def delete_cluster_dir(): + shutil.rmtree(args.cluster_directory) + + +with contextlib.ExitStack() as defer: + if not args.keep_cluster_directory: + defer.callback(delete_cluster_dir) + + defer.callback(shutdown_cluster) + + for i, node in enumerate(ccm_cluster.nodelist()): + print(f'Configuring node {node.name}') + + node.exporter_port = 9500 + i + + if args.exporter_jar.type == ExporterJar.ExporterType.AGENT: + node.set_environment_variable('JVM_OPTS', f'-javaagent:{args.exporter_jar.path}=-l:{node.exporter_port}') + + # set dc/rack manually, since CCM doesn't support custom racks + node.set_configuration_options({ + 'endpoint_snitch': 'GossipingPropertyFileSnitch' + }) + + rackdc_path = os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties') + + node.rack_idx = (int(i / args.nodes) % args.racks) + 1 + node.dc_idx = (int(i / (args.nodes * args.racks))) + 1 + + with open(rackdc_path, 'w') as f: + f.write(f'dc=dc{node.dc_idx}\nrack=rack{node.rack_idx}\n') + + print('Starting cluster...') + ccm_cluster.start() + + if args.exporter_jar.type == ExporterJar.ExporterType.STANDALONE: + print('Starting standalone exporters...') + + for node in ccm_cluster.nodelist(): + logfile = open(Path(node.get_path()) / 'logs' / 'cassandra-exporter.log', 'w') + + command = ['java', + '-jar', args.exporter_jar.path, + '--listen', f':{node.exporter_port}', + '--jmx-service-url', f'service:jmx:rmi:///jndi/rmi://localhost:{node.jmx_port}/jmxrmi', + '--cql-address', f'localhost:{node.network_interfaces["binary"][1]}' + ] + print(' '.join(map(str, command))) + proc = subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) + + defer.callback(proc.terminate) + + print('Connecting to cluster...') + contact_points = map(lambda n: cassandra.connection.DefaultEndPoint(*n.network_interfaces['binary']), ccm_cluster.nodelist()) + + cql_cluster = cassandra.cluster.Cluster(list(contact_points)) + with cql_cluster.connect() as cql_session: + print('Applying schema...') + for stmt in args.schema: + print('Executing "{}"...'.format(stmt.split('\n')[0])) + cql_session.execute(stmt) + + + + # the collector defers registrations by a second or two. + # See com.zegelin.cassandra.exporter.Harvester.defer() + print('Pausing to wait for deferred MBean registrations to complete...') + time.sleep(5) + + print('Capturing metrics dump...') + for node in ccm_cluster.nodelist(): + url = f'http://{node.ip_addr}:{node.exporter_port}/metrics?x-accept=text/plain' + destination = args.output_directory / f'{node.name}.txt' + urllib.request.urlretrieve(url, destination) + print(f'Wrote {url} to {destination}') + + diff --git a/test/schema.yaml b/test/schema.yaml new file mode 100644 index 0000000..24b20f7 --- /dev/null +++ b/test/schema.yaml @@ -0,0 +1,31 @@ +- > + CREATE KEYSPACE example WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; +- > + CREATE TABLE example.metric_families ( + name text, + type text, + help text, + PRIMARY KEY (name) + ) +- > + CREATE INDEX ON example.metric_families (type) +- > + CREATE TABLE example.numeric_metrics ( + family text, + labels frozen>, + bucket date, + time timestamp, + value float, + + PRIMARY KEY ((family, labels, bucket), time) + ) +- > + CREATE MATERIALIZED VIEW example.numeric_metric_labels AS + SELECT family, labels, bucket, time + FROM example.numeric_metrics + WHERE family IS NOT NULL AND + labels IS NOT NULL AND + bucket IS NOT NULL AND + time IS NOT NULL + PRIMARY KEY (family, labels, bucket, time) + diff --git a/test/setup.py b/test/setup.py new file mode 100644 index 0000000..569e24f --- /dev/null +++ b/test/setup.py @@ -0,0 +1,11 @@ +from setuptools import setup + +setup( + name='cassandra-exporter-e2e-tests', + version='1.0', + description='End-to-end testing tools for cassandra-exporter', + author='Adam Zegelin', + author_email='adam@instaclustr.com', + packages=['cassandra-exporter-e2e-tests'], + install_requires=['ccm', 'prometheus_client', 'cassandra-driver', 'frozendict'], +) \ No newline at end of file From 45ab03ce788d444ae16f6515290e3704a613fdf0 Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Wed, 23 Oct 2019 20:45:50 -0700 Subject: [PATCH 02/19] Additional work-in-progress for end-to-end tests. --- test/capture_dump.py | 233 ++++++++------------------------- test/debug_agent.py | 62 +++++++++ test/e2e_test.py | 144 ++++++++++++++++++++ test/e2e_test_tests.py | 10 ++ test/metric_dump_tool.py | 228 ++++++++++++++++++++++++++++++++ test/metric_dump_tool_tests.py | 103 +++++++++++++++ test/utils/ccm.py | 122 +++++++++++++++++ test/utils/jar_utils.py | 73 +++++++++++ test/utils/path_utils.py | 26 ++++ test/utils/prometheus.py | 165 +++++++++++++++++++++++ test/validate_metrics.py | 1 + 11 files changed, 990 insertions(+), 177 deletions(-) create mode 100644 test/debug_agent.py create mode 100644 test/e2e_test.py create mode 100644 test/e2e_test_tests.py create mode 100644 test/metric_dump_tool.py create mode 100644 test/metric_dump_tool_tests.py create mode 100644 test/utils/ccm.py create mode 100644 test/utils/jar_utils.py create mode 100644 test/utils/path_utils.py create mode 100644 test/utils/prometheus.py create mode 100644 test/validate_metrics.py diff --git a/test/capture_dump.py b/test/capture_dump.py index 91e8e3f..24d7743 100644 --- a/test/capture_dump.py +++ b/test/capture_dump.py @@ -1,30 +1,28 @@ # spin up a multi-node CCM cluster with cassandra-exporter installed, apply a schema, and capture the metrics output import argparse -import re +import contextlib +import os import shutil import subprocess +import tempfile import time -import zipfile - -import ccmlib.cluster -import ccmlib.node -import os -from enum import Enum, auto -from pathlib import Path -import contextlib import urllib.request +from pathlib import Path + import cassandra.cluster import cassandra.connection -import tempfile -import xml.etree.ElementTree as ElementTree -from collections import namedtuple - +import ccmlib.cluster +import ccmlib.node import yaml +from utils.ccm import create_ccm_cluster, TestCluster +from utils.jar_utils import ExporterJar +from utils.path_utils import existing_file_arg + def schema_yaml(path): - path = existing_file(path) + path = existing_file_arg(path) with open(path, 'r') as f: schema_yaml = yaml.load(f, Loader=yaml.SafeLoader) @@ -38,18 +36,6 @@ def schema_yaml(path): return schema_yaml - -def existing_file(path): - path = Path(path) - if not path.exists(): - raise argparse.ArgumentTypeError(f'file "{path}" does not exist.') - - if not path.is_file(): - raise argparse.ArgumentTypeError(f'"{path}" is not a regular file.') - - return path - - def cluster_directory(path): path = Path(path) @@ -74,176 +60,69 @@ def output_directory(path): return path - -class ExporterJar(namedtuple('ExporterJar', ['path', 'type'])): - class ExporterType(Enum): - AGENT = ('Premain-Class', 'com.zegelin.cassandra.exporter.Agent') - STANDALONE = ('Main-Class', 'com.zegelin.cassandra.exporter.Application') - - @classmethod - def from_path(cls, path): - path = existing_file(path) - - # determine the JAR type (agent or standalone) via the Main/Premain class - try: - with zipfile.ZipFile(path) as zf: - manifest = zf.open('META-INF/MANIFEST.MF').readlines() - - def parse_line(line): - m = re.match('(.+): (.+)', line.decode("utf-8").strip()) - return None if m is None else m.groups() - - manifest = dict(filter(None, map(parse_line, manifest))) - - type = next(iter([t for t in ExporterJar.ExporterType if t.value in manifest.items()]), None) - if type is None: - raise argparse.ArgumentTypeError(f'"{path}" is not a cassandra-exporter jar.') - - return cls(path, type) - - except (zipfile.BadZipFile, KeyError): - raise argparse.ArgumentTypeError(f'"{path}" is not a jar.') - - -def default_jar_path(): - project_dir = Path(__file__).parents[1] - try: - root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() - project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text - - return project_dir / f'agent/target/cassandra-exporter-agent-{project_version}.jar' - - except: - return None - - def default_schema_path(): test_dir = Path(__file__).parent return test_dir / "schema.yaml" -parser = argparse.ArgumentParser() -parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") -parser.add_argument('output_directory', type=output_directory, help="location to write metrics dumps", metavar="OUTPUT_DIRECTORY") - -parser.add_argument('-o', '--overwrite-output', action='store_true', help="don't abort when the output directory exists or is not empty") - -parser.add_argument('--cluster-directory', type=cluster_directory, help="location to install Cassandra. Must be empty or not exist. (default is a temporary directory)") -parser.add_argument('--keep-cluster-directory', type=bool, help="don't delete the cluster directory on exit") - - -parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", default=2) -parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", default=3) -parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", default=1) - -parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(default_jar_path())) -parser.add_argument('-s', '--schema', type=schema_yaml, help="CQL schema to apply (default: %(default)s)", default=str(default_schema_path())) +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") + parser.add_argument('output_directory', type=output_directory, help="location to write metrics dumps", metavar="OUTPUT_DIRECTORY") -args = parser.parse_args() + parser.add_argument('-o', '--overwrite-output', action='store_true', help="don't abort when the output directory exists or is not empty") + parser.add_argument('--cluster-directory', type=cluster_directory, help="location to install Cassandra. Must be empty or not exist. (default is a temporary directory)") + parser.add_argument('--keep-cluster-directory', type=bool, help="don't delete the cluster directory on exit") + parser.add_argument('--keep-cluster-running', type=bool, help="don't stop the cluster on exit (implies --keep-cluster-directory)") -if args.cluster_directory is None: - args.cluster_directory = Path(tempfile.mkdtemp()) / "cluster" + parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", default=2) + parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", default=3) + parser.add_argument('-n', '--nodes', type=int, help="number of nodes (default: %(default)s)", default=6) -if args.cluster_directory.exists(): - args.cluster_directory.rmdir() # CCM wants to create this + parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(ExporterJar.default_jar_path())) + parser.add_argument('-s', '--schema', type=schema_yaml, help="CQL schema to apply (default: %(default)s)", default=str(default_schema_path())) + args = parser.parse_args() -if args.output_directory.exists() and not args.overwrite_output: - if next(args.output_directory.iterdir(), None) is not None: - raise argparse.ArgumentTypeError(f'"{args.output_directory}" must be an empty directory.') + if args.cluster_directory is None: + args.cluster_directory = Path(tempfile.mkdtemp()) / "test-cluster" -os.makedirs(args.output_directory, exist_ok=True) + if args.output_directory.exists() and not args.overwrite_output: + if next(args.output_directory.iterdir(), None) is not None: + raise argparse.ArgumentTypeError(f'"{args.output_directory}" must be an empty directory.') + os.makedirs(args.output_directory, exist_ok=True) -ccm_cluster = ccmlib.cluster.Cluster( - path=args.cluster_directory.parent, - name=args.cluster_directory.name, - version=args.cassandra_version, - create_directory=True # if this is false, various config files wont be created... -) + with contextlib.ExitStack() as defer: + ccm_cluster = defer.push(TestCluster( + args.cluster_directory, args.cassandra_version + )) -ccm_cluster.populate(nodes=args.nodes * args.racks * args.datacenters) + print('Starting cluster...') + ccm_cluster.start() + print('Connecting to cluster...') + contact_points = map(lambda n: cassandra.connection.DefaultEndPoint(*n.network_interfaces['binary']), ccm_cluster.nodelist()) -def shutdown_cluster(): - print('Stopping cluster...') - ccm_cluster.stop() + cql_cluster = cassandra.cluster.Cluster(list(contact_points)) + with cql_cluster.connect() as cql_session: + print('Applying schema...') + for stmt in args.schema: + print('Executing "{}"...'.format(stmt.split('\n')[0])) + cql_session.execute(stmt) + # the collector defers registrations by a second or two. + # See com.zegelin.cassandra.exporter.Harvester.defer() + print('Pausing to wait for deferred MBean registrations to complete...') + time.sleep(5) -def delete_cluster_dir(): - shutil.rmtree(args.cluster_directory) - - -with contextlib.ExitStack() as defer: - if not args.keep_cluster_directory: - defer.callback(delete_cluster_dir) - - defer.callback(shutdown_cluster) - - for i, node in enumerate(ccm_cluster.nodelist()): - print(f'Configuring node {node.name}') - - node.exporter_port = 9500 + i - - if args.exporter_jar.type == ExporterJar.ExporterType.AGENT: - node.set_environment_variable('JVM_OPTS', f'-javaagent:{args.exporter_jar.path}=-l:{node.exporter_port}') - - # set dc/rack manually, since CCM doesn't support custom racks - node.set_configuration_options({ - 'endpoint_snitch': 'GossipingPropertyFileSnitch' - }) - - rackdc_path = os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties') - - node.rack_idx = (int(i / args.nodes) % args.racks) + 1 - node.dc_idx = (int(i / (args.nodes * args.racks))) + 1 - - with open(rackdc_path, 'w') as f: - f.write(f'dc=dc{node.dc_idx}\nrack=rack{node.rack_idx}\n') - - print('Starting cluster...') - ccm_cluster.start() - - if args.exporter_jar.type == ExporterJar.ExporterType.STANDALONE: - print('Starting standalone exporters...') - + print('Capturing metrics dump...') for node in ccm_cluster.nodelist(): - logfile = open(Path(node.get_path()) / 'logs' / 'cassandra-exporter.log', 'w') - - command = ['java', - '-jar', args.exporter_jar.path, - '--listen', f':{node.exporter_port}', - '--jmx-service-url', f'service:jmx:rmi:///jndi/rmi://localhost:{node.jmx_port}/jmxrmi', - '--cql-address', f'localhost:{node.network_interfaces["binary"][1]}' - ] - print(' '.join(map(str, command))) - proc = subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) - - defer.callback(proc.terminate) - - print('Connecting to cluster...') - contact_points = map(lambda n: cassandra.connection.DefaultEndPoint(*n.network_interfaces['binary']), ccm_cluster.nodelist()) - - cql_cluster = cassandra.cluster.Cluster(list(contact_points)) - with cql_cluster.connect() as cql_session: - print('Applying schema...') - for stmt in args.schema: - print('Executing "{}"...'.format(stmt.split('\n')[0])) - cql_session.execute(stmt) - - - - # the collector defers registrations by a second or two. - # See com.zegelin.cassandra.exporter.Harvester.defer() - print('Pausing to wait for deferred MBean registrations to complete...') - time.sleep(5) - - print('Capturing metrics dump...') - for node in ccm_cluster.nodelist(): - url = f'http://{node.ip_addr}:{node.exporter_port}/metrics?x-accept=text/plain' - destination = args.output_directory / f'{node.name}.txt' - urllib.request.urlretrieve(url, destination) - print(f'Wrote {url} to {destination}') + url = f'http://{node.ip_addr}:{node.exporter_port}/metrics?x-accept=text/plain' + destination = args.output_directory / f'{node.name}.txt' + urllib.request.urlretrieve(url, destination) + + print(f'Wrote {url} to {destination}') diff --git a/test/debug_agent.py b/test/debug_agent.py new file mode 100644 index 0000000..38eac05 --- /dev/null +++ b/test/debug_agent.py @@ -0,0 +1,62 @@ +import argparse +import os +from pathlib import Path + +from ccmlib.cluster_factory import ClusterFactory + +from utils.ccm import create_ccm_cluster +from utils.jar_utils import ExporterJar + + +def yesno_bool(b: bool): + return ('n', 'y')[b] + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") + parser.add_argument('cluster_directory', type=Path, help="location", metavar="CLUSTER_DIRECTORY") + + parser.add_argument('--jvm-debug-wait-attach', dest='jvm_debug_wait_attach', help="suspend JVM on startup and wait for debugger to attach", action='store_true') + parser.add_argument('--no-jvm-debug-wait-attach', dest='jvm_debug_wait_attach', help="suspend JVM on startup and wait for debugger to attach", action='store_false') + parser.add_argument('--jvm-debug-address', type=str, help="address/port for JVM debug agent to listen on", default='5005') + + parser.add_argument('--exporter-args', type=str, help="exporter agent arguments", default='-l:9500') + parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(ExporterJar.default_jar_path())) + + parser.set_defaults(jvm_debug_wait_attach=True) + + args = parser.parse_args() + + print(f'Cluster directory is: {args.cluster_directory}') + + if not args.cluster_directory.exists() or \ + (args.cluster_directory.exists() and next(args.cluster_directory.iterdir(), None) is None): + + # non-existent or empty directory -- new cluster + ccm_cluster = create_ccm_cluster(args.cluster_directory, args.cassandra_version, node_count=1) + + else: + # existing, non-empty directory -- assume existing cluster + print('Loading cluster...') + ccm_cluster = ClusterFactory.load(args.cluster_directory.parent, args.cluster_directory.name) + + node = ccm_cluster.nodelist()[0] + print(f'Configuring node {node.name}') + + node.set_environment_variable('JVM_OPTS', f'-javaagent:{args.exporter_jar.path}={args.exporter_args} -agentlib:jdwp=transport=dt_socket,server=y,suspend={yesno_bool(args.jvm_debug_wait_attach)},address={args.jvm_debug_address}') + + print(f'JVM remote debugger listening on {args.jvm_debug_address}. JVM will suspend on start.') + print('Starting single node cluster...') + + launch_bin = node.get_launch_bin() + args = [launch_bin, '-f'] + env = node.get_env() + + os.execve(launch_bin, args, env) + + + + + + + diff --git a/test/e2e_test.py b/test/e2e_test.py new file mode 100644 index 0000000..e612c78 --- /dev/null +++ b/test/e2e_test.py @@ -0,0 +1,144 @@ +# this end-to-end test does the following: +# 1. download Prometheus (for the current platform) +# 2. setup a multi-node Cassandra cluster with the exporter installed +# 3. configure Prometheus it to scrape from the Cassandra nodes +# 4. verifies that Prometheus successfully scrapes the metrics +# 5. cleans up everything +import argparse +import contextlib +import http.server +import itertools +import json +import platform +import re +import shutil +import socketserver +import subprocess +import tarfile +import tempfile +import threading +import time +import urllib.request +import urllib.error +from collections import namedtuple +from pathlib import Path +from types import SimpleNamespace + +import cassandra.connection +from tqdm import tqdm + +from utils.ccm import create_ccm_cluster, TestCluster +from utils.jar_utils import ExporterJar +from utils.path_utils import nonexistent_or_empty_directory_arg + +import yaml + +from utils.prometheus import PrometheusInstance + + +class DummyPrometheusHTTPHandler(http.server.BaseHTTPRequestHandler): + def do_GET(self): + if self.path != '/metrics': + self.send_error(404) + + self.send_response(200) + self.end_headers() + + self.wfile.write(b'# TYPE test_family gauge\n' + b'test_family 123\n') + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") + + parser.add_argument('--working-directory', type=nonexistent_or_empty_directory_arg, help="location to install Cassandra and Prometheus. Must be empty or not exist. (default is a temporary directory)") + parser.add_argument('--keep-working-directory', help="don't delete the cluster directory on exit", action='store_true') + + parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", default=2) + parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", default=3) + parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", default=1) + + parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(ExporterJar.default_jar_path())) + # parser.add_argument('-s', '--schema', type=schema_yaml, help="CQL schema to apply (default: %(default)s)", default=str(default_schema_path())) + + parser.add_argument('-x', '--prometheus-archive-url', type=str, help='Prometheus binary release archive URL (default: %(default)s)', default=PrometheusInstance.default_prometheus_archive_url()) + + args = parser.parse_args() + + if args.working_directory is None: + args.working_directory = Path(tempfile.mkdtemp()) + + def delete_working_dir(): + shutil.rmtree(args.working_directory) + + with contextlib.ExitStack() as defer: + if not args.keep_working_directory: + defer.callback(delete_working_dir) # LIFO order -- this gets called last + + print('Setting up Prometheus...') + prometheus = defer.push(PrometheusInstance( + prometheus_archive_url=args.prometheus_archive_url, + base_directory=args.working_directory / 'prometheus' + )) + + # print('Setting up Cassandra...') + # ccm_cluster = defer.push(TestCluster( + # cluster_directory=args.working_directory / 'test-cluster', + # cassandra_version=args.cassandra_version, + # node_count=1, + # delete_cluster_on_stop=not args.keep_working_directory + # )) + + + httpd = http.server.HTTPServer(("", 9500), DummyPrometheusHTTPHandler) + threading.Thread(target=httpd.serve_forever, daemon=True).start() + + httpd = http.server.HTTPServer(("", 9501), DummyPrometheusHTTPHandler) + threading.Thread(target=httpd.serve_forever, daemon=True).start() + + # prometheus.set_scrape_config('cassandra', list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) + prometheus.set_scrape_config('cassandra', ['localhost:9500', 'localhost:9501']) + prometheus.start() + + + + + + #print('Starting cluster...') + # ccm_cluster.start() + + while True: + targets = prometheus.get_targets() + + for target in targets['activeTargets']: + + break + + + for _ in tqdm(itertools.count()): + targets = prometheus.get_targets() + + cassandra_target = None + + for target in targets['activeTargets']: + if target['labels']['job'] == 'cassandra': + cassandra_target = target + break + + if cassandra_target is not None: + if cassandra_target['health'] == 'up': + break + + time.sleep(1) + + + data = prometheus.query('test_family') + pass + + + + + + diff --git a/test/e2e_test_tests.py b/test/e2e_test_tests.py new file mode 100644 index 0000000..ef5b19c --- /dev/null +++ b/test/e2e_test_tests.py @@ -0,0 +1,10 @@ +# Tests for the End-to-End Test! + +import pprint +import unittest +from metric_dump_tool import MetricsDump +import metric_dump_tool + + +class ValidationTests(unittest.TestCase): + pass \ No newline at end of file diff --git a/test/metric_dump_tool.py b/test/metric_dump_tool.py new file mode 100644 index 0000000..e197c99 --- /dev/null +++ b/test/metric_dump_tool.py @@ -0,0 +1,228 @@ +import argparse +import io + +import ccmlib.cluster +import os +import urllib.request +import re +from collections import namedtuple, defaultdict, Counter +from enum import Enum, auto, unique +from frozendict import frozendict +import itertools + +from prometheus_client.parser import text_fd_to_metric_families +import prometheus_client.samples + + +class MetricsDump(namedtuple('MetricsDump', ['path', 'metric_families'])): + __slots__ = () + + @classmethod + def from_file(cls, path): + with open(path, 'rt', encoding='utf-8') as fd: + return MetricsDump.from_lines(fd) + + @classmethod + def from_lines(cls, lines): + with io.StringIO(lines) as fd: + return MetricsDump.from_fd(fd) + + @classmethod + def from_fd(cls, fd): + def parse_lines(): + for family in text_fd_to_metric_families(fd): + # freeze the labels dict so its hashable and the keys can be used as a set + family.samples = [sample._replace(labels=frozendict(sample.labels)) for sample in family.samples] + + yield family + + metric_families = list(parse_lines()) + + path = '' + if isinstance(fd, io.BufferedReader): + path = fd.name + + return MetricsDump(path, metric_families) + + +ValidationResult = namedtuple('ValidationResult', ['duplicate_families', 'duplicate_samples']) +DiffResult = namedtuple('DiffResult', ['added_families', 'removed_families', 'added_samples', 'removed_samples']) + +# patch Sample equality & hash so that only name + labels are the identity (ignore value, timestamp, etc) +prometheus_client.samples.Sample.__eq__ = lambda self, o: (isinstance(o, prometheus_client.samples.Sample) and self.name == o.name and self.labels == o.labels) +prometheus_client.samples.Sample.__hash__ = lambda self: hash((self.name, self.labels)) + + + + +def validate_dump(dump: MetricsDump) -> ValidationResult: + def find_duplicate_families(): + def family_name_key_fn(f): + return f.name + + families = sorted(dump.metric_families, key=family_name_key_fn) # sort by name + family_groups = itertools.groupby(families, key=family_name_key_fn) # group by name + family_groups = ((k, list(group)) for k, group in family_groups) # convert groups to lists + + return {name: group for name, group in family_groups if len(group) > 1} + + def find_duplicate_samples(): + samples = itertools.chain(family.samples for family in dump.metric_families) + #sample_groups = + + return + + + return ValidationResult( + duplicate_families=find_duplicate_families(), + duplicate_samples=find_duplicate_samples() + ) + + # duplicate_metric_families = [key for key, value + # in Counter([metric.name for metric in families]).items() + # if value > 1] + + # if len(duplicate_metric_families): + # print('The following metric families are duplicated:') + # for family_name in duplicate_metric_families: + # print(f'\t{family_name}') + + + # # find duplicate samples + # for family in args.dump.metric_families: + # duplicate_samples = [key for key, value + # in Counter(family.samples).items() + # if value > 1] + # + # if len(duplicate_samples) == 0: + # continue + # + # print(f'Metric family "{family.name}" contains duplicate samples:') + # + # for sample in duplicate_samples: + # print(f'\t{sample}') + + +def validate_dump_entrypoint(args): + result = validate_dump(args.dump) + + if len(result.duplicate_families): + print('The following metric families are duplicated:') + + for name, group in result.duplicate_families.items(): + print(f'\t{name}') + + pass + + + +def diff_dump(from_dump: MetricsDump, to_dump): + def diff_families(): + from_families = [(metric.name, metric.type) for metric in from_dump.metric_families] + to_families = [(metric.name, metric.type) for metric in to_dump.metric_families] + + pass + + diff_families() + + return DiffResult([], [], [], []) + +def diff_dump_entrypoint(args): + pass + + + + + + +def prometheus_metrics(path): + try: + return MetricsDump.from_file(path) + + except Exception as e: + raise argparse.ArgumentTypeError(f"error while parsing '{path}': {e}") from e + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers() + + validate_parser = subparsers.add_parser('validate', help='validate a metrics dump for common problems') + validate_parser.add_argument("dump", type=prometheus_metrics, metavar="DUMP") + validate_parser.set_defaults(entrypoint=validate_dump_entrypoint) + + diff_parser = subparsers.add_parser('diff', help='diff two metrics dumps') + diff_parser.add_argument("from", type=prometheus_metrics, metavar="FROM") + diff_parser.add_argument("to", type=prometheus_metrics, metavar="TO") + diff_parser.set_defaults(entrypoint=diff_dump_entrypoint) + + + args = parser.parse_args() + args.entrypoint(args) + + + + + + + + + + +# def index_metrics(metrics): +# for metric in metrics: +# metric.samples = {sample.labels: sample for sample in metric.samples} +# +# return {metric.name: metric for metric in metrics} +# +# +# # index the metrics (convert lists to dicts) -- this removes duplicated families/samples +# from_metric_families = index_metrics(known_metric_families) +# to_metric_families = index_metrics(latest_metric_families) +# +# # find differences +# known_names = set(known_metric_families.keys()) +# latest_names = set(latest_metric_families.keys()) +# +# removed_names = known_names.difference(latest_names) +# if len(removed_names): +# print('The following metric families no longer exist:') +# for name in removed_names: +# print(f'\t{name}') +# +# added_names = latest_names.difference(known_names) +# if len(added_names): +# print('The following metric families are new:') +# for name in added_names: +# print(f'\t{name}') +# +for name in latest_names.intersection(known_names): +# known_metric = known_metric_families[name] +# latest_metric = latest_metric_families[name] +# +# known_labels = set(known_metric.samples.keys()) +# latest_labels = set(latest_metric.samples.keys()) +# +# removed_labels = known_labels.difference(latest_labels) +# if len(removed_labels): +# print(f'The following samples no longer exist for metric family "{name}":') +# for labels in removed_labels: +# print(f'\t{labels}') +# +# added_labels = latest_labels.difference(known_labels) +# if len(added_labels): +# print(f'The following samples are new for metric family "{name}":') +# for labels in added_labels: +# print(f'\t{labels}') +# +# +# pass +# +# +# pass + + +# +# # cluster.stop() +# +# # cluster.set_environment_variable() \ No newline at end of file diff --git a/test/metric_dump_tool_tests.py b/test/metric_dump_tool_tests.py new file mode 100644 index 0000000..2994bbb --- /dev/null +++ b/test/metric_dump_tool_tests.py @@ -0,0 +1,103 @@ +import pprint +import unittest +from metric_dump_tool import MetricsDump +import metric_dump_tool + + +class ValidationTests(unittest.TestCase): +# def test_invalid_input(self): +# """ +# Test the +# """ +# data = """ +# busted busted busted +# """ +# +# with self.assertRaises(ValueError): +# metric_dump_tool.MetricsDump.from_lines(data) + + def test_duplicate_families(self): + """ + Test that validation finds duplicated metric families + """ + dump = MetricsDump.from_lines(""" +# TYPE test_family_a counter +test_family_a {} 1234 1234 + +test_family_b {} 0 0 + +# TYPE test_family_a gauge +test_family_a {} 5678 1234 + +# the following are duplicate samples, not duplicate families +# TYPE test_family_c gauge +test_family_c {} 1234 1234 +test_family_c {} 1234 1234 + +# the following are duplicate families +test_family_d {abc="123"} 0 0 +test_family_d {abc="456"} 0 0 + """) + + result = metric_dump_tool.validate_dump(dump) + + self.assertIn('test_family_a', result.duplicate_families) + self.assertIn('test_family_d', result.duplicate_families) + self.assertNotIn('test_family_b', result.duplicate_families) + self.assertNotIn('test_family_c', result.duplicate_families) + + def test_duplicate_samples(self): + """ + Test that validation finds duplicated metric families + """ + dump = MetricsDump.from_lines(""" +# TYPE test_family_a gauge +test_family_a {hello="world"} 1234 1234 +test_family_a {hello="world"} 1234 1234 + """) + + result = metric_dump_tool.validate_dump(dump) + + self.assertIn('test_family_a', result.duplicate_families) + self.assertNotIn('test_family_b', result.duplicate_families) + + +class DiffTests(unittest.TestCase): + def test_added_families(self): + from_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 + """) + + to_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 +test_family_a {hello="universe"} 0 0 + +test_family_b {} 0 0 + """) + + result = metric_dump_tool.diff_dump(from_dump, to_dump) + + self.assertIn('test_family_b', result.added_families) + self.assertNotIn('test_family_a', result.added_families) + + def test_removed_families(self): + from_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 +test_family_a {hello="universe"} 0 0 + +test_family_b {} 0 0 + """) + + to_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 + """) + + result = metric_dump_tool.diff_dump(from_dump, to_dump) + + self.assertIn('test_family_b', result.removed_families) + self.assertNotIn('test_family_a', result.removed_families) + + + +if __name__ == '__main__': + unittest.main() diff --git a/test/utils/ccm.py b/test/utils/ccm.py new file mode 100644 index 0000000..cc65c26 --- /dev/null +++ b/test/utils/ccm.py @@ -0,0 +1,122 @@ +import shutil +import signal +import subprocess +from pathlib import Path +from typing import List + +from ccmlib.cluster import Cluster + +from utils.jar_utils import ExporterJar + + +class TestCluster(Cluster): + standalone_processes: List[subprocess.Popen] = [] + + def __init__(self, cluster_directory: Path, cassandra_version: str, + nodes: int, racks: int, datacenters: int, + exporter_jar: ExporterJar, + stop_on_exit: bool = True, delete_cluster_on_stop: bool = True): + + if cluster_directory.exists(): + cluster_directory.rmdir() # CCM wants to create this + + super().__init__( + path=cluster_directory.parent, + name=cluster_directory.name, + version=cassandra_version, + create_directory=True # if this is false, various config files wont be created... + ) + + self.stop_on_exit = stop_on_exit + self.delete_cluster_on_stop = delete_cluster_on_stop + + self.exporter_jar = exporter_jar + + self.populate(nodes, racks, datacenters) + + def populate(self, nodes: int, racks: int = 1, datacenters: int = 1, + debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.', ipformat=None, + install_byteman=False): + result = super().populate(nodes, debug, tokens, use_vnodes, ipprefix, ipformat, install_byteman) + + for i, node in enumerate(self.nodelist()): + node.exporter_port = 9500 + i + + if self.exporter_jar.type == ExporterJar.ExporterType.AGENT: + node.set_environment_variable('JVM_OPTS', f'-javaagent:{self.exporter_jar.path}=-l:{node.exporter_port}') + + # set dc/rack manually, since CCM doesn't support custom racks + node.set_configuration_options({ + 'endpoint_snitch': 'GossipingPropertyFileSnitch' + }) + + rackdc_path = Path(node.get_conf_dir()) / 'cassandra-rackdc.properties' + + node.rack_idx = (int(i / nodes) % racks) + 1 + node.dc_idx = (int(i / nodes * racks)) + 1 + + with open(rackdc_path, 'w') as f: + f.write(f'dc=dc{node.dc_idx}\nrack=rack{node.rack_idx}\n') + + return result + + def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True, wait_other_notice=True, jvm_args=None, + profile_options=None, quiet_start=False, allow_root=False, **kwargs): + + result = super().start(no_wait, verbose, wait_for_binary_proto, wait_other_notice, jvm_args, profile_options, + quiet_start, allow_root, **kwargs) + + # start the standalone exporters, if requested + if self.exporter_jar.type == ExporterJar.ExporterType.STANDALONE: + for node in self.nodelist(): + process = self.exporter_jar.start_standalone( + logfile_path=Path(node.get_path()) / 'logs' / 'cassandra-exporter.log', + listen_address=('localhost', node.exporter_port), + jmx_address=('localhost', node.jmx_port), + cql_address=node.network_interfaces["binary"] + ) + + self.standalone_processes.append(process) + + return result + + def stop(self, wait=True, signal_event=signal.SIGTERM, **kwargs): + result = super().stop(wait, signal_event, **kwargs) + + # shutdown standalone exporters, if they're still running + for p in self.standalone_processes: + p.terminate() + + if wait: + p.wait() + + if self.delete_cluster_on_stop: + shutil.rmtree(self.get_path()) + + return result + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.stop_on_exit: + self.stop() + + + +def create_ccm_cluster(cluster_directory: Path, cassandra_version: str, node_count: int): + if cluster_directory.exists(): + cluster_directory.rmdir() # CCM wants to create this + + print('Creating cluster...') + ccm_cluster = Cluster( + path=cluster_directory.parent, + name=cluster_directory.name, + version=cassandra_version, + create_directory=True # if this is false, various config files wont be created... + ) + + ccm_cluster.populate(nodes=node_count) + + return ccm_cluster + diff --git a/test/utils/jar_utils.py b/test/utils/jar_utils.py new file mode 100644 index 0000000..e3990b4 --- /dev/null +++ b/test/utils/jar_utils.py @@ -0,0 +1,73 @@ +import argparse +import re +import subprocess +import zipfile +from collections import namedtuple +from enum import Enum +from pathlib import Path +from xml.etree import ElementTree + +from utils.path_utils import existing_file_arg + + +class ExporterJar(namedtuple('ExporterJar', ['path', 'type'])): + class ExporterType(Enum): + AGENT = ('Premain-Class', 'com.zegelin.cassandra.exporter.Agent') + STANDALONE = ('Main-Class', 'com.zegelin.cassandra.exporter.Application') + + @classmethod + def from_path(cls, path): + path = existing_file_arg(path) + + # determine the JAR type (agent or standalone) via the Main/Premain class + try: + with zipfile.ZipFile(path) as zf: + manifest = zf.open('META-INF/MANIFEST.MF').readlines() + + def parse_line(line): + m = re.match('(.+): (.+)', line.decode("utf-8").strip()) + return None if m is None else m.groups() + + manifest = dict(filter(None, map(parse_line, manifest))) + + type = next(iter([t for t in ExporterJar.ExporterType if t.value in manifest.items()]), None) + if type is None: + raise argparse.ArgumentTypeError(f'"{path}" is not a cassandra-exporter jar.') + + return cls(path, type) + + except (zipfile.BadZipFile, KeyError): + raise argparse.ArgumentTypeError(f'"{path}" is not a jar.') + + def start_standalone(self, listen_address: (str, int), + jmx_address: (str, int), + cql_address: (str, int), + logfile_path: Path): + + logfile = logfile_path.open('w') + + def addr_str(address: (str, int)): + return ':'.join(address) + + command = ['java', + '-jar', self.path, + '--listen', addr_str(listen_address), + '--jmx-service-url', f'service:jmx:rmi:///jndi/rmi://{addr_str(jmx_address)}/jmxrmi', + '--cql-address', addr_str(cql_address) + ] + print(' '.join(map(str, command))) + return subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) + + @staticmethod + def default_jar_path(): + project_dir = Path(__file__).parents[2] + try: + root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() + project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text + + return project_dir / f'agent/target/cassandra-exporter-agent-{project_version}.jar' + + except: + return None + + diff --git a/test/utils/path_utils.py b/test/utils/path_utils.py new file mode 100644 index 0000000..5bc571a --- /dev/null +++ b/test/utils/path_utils.py @@ -0,0 +1,26 @@ +import argparse +from pathlib import Path + + +def existing_file_arg(path): + path = Path(path) + if not path.exists(): + raise argparse.ArgumentTypeError(f'file "{path}" does not exist.') + + if not path.is_file(): + raise argparse.ArgumentTypeError(f'"{path}" is not a regular file.') + + return path + + +def nonexistent_or_empty_directory_arg(path): + path = Path(path) + + if path.exists(): + if not path.is_dir(): + raise argparse.ArgumentTypeError(f'"{path}" must be a directory.') + + if next(path.iterdir(), None) is not None: + raise argparse.ArgumentTypeError(f'"{path}" must be an empty directory.') + + return path \ No newline at end of file diff --git a/test/utils/prometheus.py b/test/utils/prometheus.py new file mode 100644 index 0000000..7ab8026 --- /dev/null +++ b/test/utils/prometheus.py @@ -0,0 +1,165 @@ +import json +import platform +import re +import subprocess +import tarfile +import time +import urllib.request +import urllib.error +from contextlib import contextmanager +from enum import Enum, auto +from pathlib import Path +from typing import List + +import yaml +from tqdm import tqdm + + +class _TqdmIOStream(object): + def __init__(self, stream, t): + self._stream = stream + self._t = t + + def read(self, size): + buf = self._stream.read(size) + self._t.update(len(buf)) + return buf + + def __enter__(self, *args, **kwargs): + self._stream.__enter__(*args, **kwargs) + return self + + def __exit__(self, *args, **kwargs): + self._stream.__exit__(*args, **kwargs) + + def __getattr__(self, attr): + return getattr(self._stream, attr) + + +class PrometheusInstance(object): + prometheus_directory: Path = None + prometheus_process: subprocess.Popen = None + + @staticmethod + def default_prometheus_archive_url(): + try: + def architecture_str(): + machine_aliases = { + 'x86_64': 'amd64' + } + + machine = platform.machine() + machine = machine_aliases.get(machine, machine) + + system = platform.system().lower() + + return f'{system}-{machine}' + + asset_pattern = re.compile(r'prometheus-.+\.' + architecture_str() + '\.tar\..+') + + with urllib.request.urlopen('https://api.github.com/repos/prometheus/prometheus/releases/latest') as response: + release_info = json.load(response) + + for asset in release_info['assets']: + if asset_pattern.fullmatch(asset['name']) is not None: + return asset['browser_download_url'] + + except: + pass + + return None + + def __init__(self, prometheus_archive_url: str, base_directory: Path, listen_address='localhost:9090'): + self.prometheus_directory = self._download_prometheus(prometheus_archive_url, base_directory) + + self.listen_address = listen_address + + def start(self, wait=True): + self.prometheus_process = subprocess.Popen( + args=[str(self.prometheus_directory / 'prometheus'), + f'--web.listen-address={self.listen_address}'], + cwd=str(self.prometheus_directory) + ) + + if wait: + while not self.is_ready(): + time.sleep(1) + + def stop(self): + if self.prometheus_process is not None: + self.prometheus_process.terminate() + + @contextmanager + def _modify_config(self): + config_file_path = self.prometheus_directory / 'prometheus.yml' + + with config_file_path.open('r+') as stream: + config = yaml.load(stream) + + yield config + + stream.seek(0) + yaml.dump(config, stream) + stream.truncate() + + def set_scrape_config(self, job_name: str, static_targets: List[str]): + with self._modify_config() as config: + config['scrape_configs'] = [{ + 'job_name': 'cassandra', + 'static_configs': [{ + 'targets': static_targets + }] + }] + + def is_ready(self): + try: + with urllib.request.urlopen(f'http://{self.listen_address}/-/ready') as response: + return response.status == 200 + + except urllib.error.URLError as e: + if isinstance(e.reason, ConnectionRefusedError): + return False + + raise e + + def _api_call(self, path): + with urllib.request.urlopen(f'http://{self.listen_address}{path}') as response: + response_envelope = json.load(response) + + if response_envelope['status'] != 'success': + raise Exception(response.url, response.status, response_envelope) + + return response_envelope['data'] + + def get_targets(self): + return self._api_call('/api/v1/targets') + + def query(self, q): + return self._api_call(f'/api/v1/query?query={q}') + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + + if self.prometheus_process is not None: + self.prometheus_process.__exit__(exc_type, exc_val, exc_tb) + + @staticmethod + def _download_prometheus(archive_url: str, destination: Path): + print(f'Downloading {archive_url} to {destination}...') + + archive_roots = set() + + with urllib.request.urlopen(archive_url) as response: + with tqdm(total=int(response.headers.get('Content-length')), unit='bytes', unit_scale=True, miniters=1) as t: + with tarfile.open(fileobj=_TqdmIOStream(response, t), mode='r|*') as archive: + for member in archive: + t.set_postfix(file=member.name) + + archive_roots.add(Path(member.name).parts[0]) + + archive.extract(member, destination) + + return destination / next(iter(archive_roots)) \ No newline at end of file diff --git a/test/validate_metrics.py b/test/validate_metrics.py new file mode 100644 index 0000000..0748735 --- /dev/null +++ b/test/validate_metrics.py @@ -0,0 +1 @@ +# spin up compare current exporter build dump against previous known \ No newline at end of file From cca317236f043eacf8c31c43fc10099fb5fc6b7e Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Tue, 12 Nov 2019 20:45:21 -0800 Subject: [PATCH 03/19] E2E tests work in progress. --- test/capture_dump.py | 6 +----- test/e2e_test.py | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/test/capture_dump.py b/test/capture_dump.py index 24d7743..0e67ecf 100644 --- a/test/capture_dump.py +++ b/test/capture_dump.py @@ -3,8 +3,6 @@ import argparse import contextlib import os -import shutil -import subprocess import tempfile import time import urllib.request @@ -12,11 +10,9 @@ import cassandra.cluster import cassandra.connection -import ccmlib.cluster -import ccmlib.node import yaml -from utils.ccm import create_ccm_cluster, TestCluster +from utils.ccm import TestCluster from utils.jar_utils import ExporterJar from utils.path_utils import existing_file_arg diff --git a/test/e2e_test.py b/test/e2e_test.py index e612c78..5293b2e 100644 --- a/test/e2e_test.py +++ b/test/e2e_test.py @@ -1,9 +1,10 @@ # this end-to-end test does the following: # 1. download Prometheus (for the current platform) # 2. setup a multi-node Cassandra cluster with the exporter installed -# 3. configure Prometheus it to scrape from the Cassandra nodes +# 3. configure Prometheus to scrape from the Cassandra nodes # 4. verifies that Prometheus successfully scrapes the metrics # 5. cleans up everything + import argparse import contextlib import http.server @@ -113,28 +114,27 @@ def delete_working_dir(): targets = prometheus.get_targets() for target in targets['activeTargets']: - - break + break - for _ in tqdm(itertools.count()): - targets = prometheus.get_targets() + for _ in tqdm(itertools.count()): + targets = prometheus.get_targets() - cassandra_target = None + cassandra_target = None - for target in targets['activeTargets']: - if target['labels']['job'] == 'cassandra': - cassandra_target = target - break + for target in targets['activeTargets']: + if target['labels']['job'] == 'cassandra': + cassandra_target = target + break - if cassandra_target is not None: - if cassandra_target['health'] == 'up': - break + if cassandra_target is not None: + if cassandra_target['health'] == 'up': + break - time.sleep(1) + time.sleep(1) - data = prometheus.query('test_family') + data = prometheus.query('test_family') pass From 5a226e47fa201b570a9510cde650cab27240fd3e Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Tue, 3 Dec 2019 23:37:02 -0800 Subject: [PATCH 04/19] Improved e2e tests. --- test/capture_dump.py | 27 +++------- test/e2e_test.py | 102 +++++++++++++++++++++----------------- test/setup.py | 2 +- test/utils/ccm.py | 9 ++++ test/utils/jar_utils.py | 27 +++++++--- test/utils/prometheus.py | 103 ++++++++++++++++++++++++--------------- test/utils/schema.py | 35 +++++++++++++ 7 files changed, 194 insertions(+), 111 deletions(-) create mode 100644 test/utils/schema.py diff --git a/test/capture_dump.py b/test/capture_dump.py index 0e67ecf..78e50f3 100644 --- a/test/capture_dump.py +++ b/test/capture_dump.py @@ -10,28 +10,14 @@ import cassandra.cluster import cassandra.connection -import yaml + from utils.ccm import TestCluster from utils.jar_utils import ExporterJar from utils.path_utils import existing_file_arg +from utils.schema import CqlSchema -def schema_yaml(path): - path = existing_file_arg(path) - - with open(path, 'r') as f: - schema_yaml = yaml.load(f, Loader=yaml.SafeLoader) - - if not isinstance(schema_yaml, list): - raise argparse.ArgumentTypeError(f'root of the schema YAML must be a list. Got a {type(schema_yaml).__name__}.') - - for i, o in enumerate(schema_yaml): - if not isinstance(o, str): - raise argparse.ArgumentTypeError(f'schema YAML must be a list of statement strings. Item {i} is a {type(o).__name__}.') - - return schema_yaml - def cluster_directory(path): path = Path(path) @@ -56,9 +42,7 @@ def output_directory(path): return path -def default_schema_path(): - test_dir = Path(__file__).parent - return test_dir / "schema.yaml" + if __name__ == '__main__': @@ -77,7 +61,7 @@ def default_schema_path(): parser.add_argument('-n', '--nodes', type=int, help="number of nodes (default: %(default)s)", default=6) parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(ExporterJar.default_jar_path())) - parser.add_argument('-s', '--schema', type=schema_yaml, help="CQL schema to apply (default: %(default)s)", default=str(default_schema_path())) + parser.add_argument('-s', '--schema', type=CqlSchema.from_path, help="CQL schema to apply (default: %(default)s)", default=str(CqlSchema.default_schema_path())) args = parser.parse_args() @@ -98,6 +82,9 @@ def default_schema_path(): print('Starting cluster...') ccm_cluster.start() + print('Applying schema...') + ccm_cluster.apply_schema() + print('Connecting to cluster...') contact_points = map(lambda n: cassandra.connection.DefaultEndPoint(*n.network_interfaces['binary']), ccm_cluster.nodelist()) diff --git a/test/e2e_test.py b/test/e2e_test.py index 5293b2e..cb5d259 100644 --- a/test/e2e_test.py +++ b/test/e2e_test.py @@ -11,6 +11,7 @@ import itertools import json import platform +import random import re import shutil import socketserver @@ -21,12 +22,12 @@ import time import urllib.request import urllib.error -from collections import namedtuple +from collections import namedtuple, defaultdict from pathlib import Path from types import SimpleNamespace import cassandra.connection -from tqdm import tqdm +from frozendict import frozendict from utils.ccm import create_ccm_cluster, TestCluster from utils.jar_utils import ExporterJar @@ -34,7 +35,8 @@ import yaml -from utils.prometheus import PrometheusInstance +from utils.prometheus import PrometheusInstance, PrometheusArchive +from utils.schema import CqlSchema class DummyPrometheusHTTPHandler(http.server.BaseHTTPRequestHandler): @@ -45,26 +47,30 @@ def do_GET(self): self.send_response(200) self.end_headers() - self.wfile.write(b'# TYPE test_family gauge\n' - b'test_family 123\n') + #if self.server.server_port == 9500: + if random.choice([True, False]): + self.wfile.write(b'# TYPE test_family gauge\n' + b'test_family 123\n') + else: + self.wfile.write(b'# TYPE test_family gauge\n' + b'test_family123\n') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") - parser.add_argument('--working-directory', type=nonexistent_or_empty_directory_arg, help="location to install Cassandra and Prometheus. Must be empty or not exist. (default is a temporary directory)") + parser.add_argument('-C', '--working-directory', type=nonexistent_or_empty_directory_arg, help="location to install Cassandra and Prometheus. Must be empty or not exist. (default is a temporary directory)") parser.add_argument('--keep-working-directory', help="don't delete the cluster directory on exit", action='store_true') parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", default=2) parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", default=3) - parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", default=1) + parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", default=3) - parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(ExporterJar.default_jar_path())) - # parser.add_argument('-s', '--schema', type=schema_yaml, help="CQL schema to apply (default: %(default)s)", default=str(default_schema_path())) - - parser.add_argument('-x', '--prometheus-archive-url', type=str, help='Prometheus binary release archive URL (default: %(default)s)', default=PrometheusInstance.default_prometheus_archive_url()) + ExporterJar.add_jar_argument('--exporter-jar', parser) + CqlSchema.add_schema_argument('--schema', parser) + PrometheusArchive.add_archive_argument('--prometheus-archive', parser) args = parser.parse_args() @@ -80,62 +86,70 @@ def delete_working_dir(): print('Setting up Prometheus...') prometheus = defer.push(PrometheusInstance( - prometheus_archive_url=args.prometheus_archive_url, - base_directory=args.working_directory / 'prometheus' + archive=args.prometheus_archive, + working_directory=args.working_directory )) - # print('Setting up Cassandra...') - # ccm_cluster = defer.push(TestCluster( - # cluster_directory=args.working_directory / 'test-cluster', - # cassandra_version=args.cassandra_version, - # node_count=1, - # delete_cluster_on_stop=not args.keep_working_directory - # )) - + print('Setting up Cassandra...') + ccm_cluster = defer.push(TestCluster( + cluster_directory=args.working_directory / 'test-cluster', + cassandra_version=args.cassandra_version, + exporter_jar=args.exporter_jar, + nodes=args.nodes, racks=args.racks, datacenters=args.datacenters, + delete_cluster_on_stop=not args.keep_working_directory, + )) - httpd = http.server.HTTPServer(("", 9500), DummyPrometheusHTTPHandler) - threading.Thread(target=httpd.serve_forever, daemon=True).start() - httpd = http.server.HTTPServer(("", 9501), DummyPrometheusHTTPHandler) - threading.Thread(target=httpd.serve_forever, daemon=True).start() + # httpd = http.server.HTTPServer(("", 9500), DummyPrometheusHTTPHandler) + # threading.Thread(target=httpd.serve_forever, daemon=True).start() + # + # httpd = http.server.HTTPServer(("", 9501), DummyPrometheusHTTPHandler) + # threading.Thread(target=httpd.serve_forever, daemon=True).start() - # prometheus.set_scrape_config('cassandra', list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) - prometheus.set_scrape_config('cassandra', ['localhost:9500', 'localhost:9501']) + prometheus.set_scrape_config('cassandra', list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) + # prometheus.set_scrape_config('cassandra', ['localhost:9500', 'localhost:9501']) prometheus.start() - #print('Starting cluster...') - # ccm_cluster.start() + print('Starting cluster...') + ccm_cluster.start() + + print('Pausing to wait for deferred MBean registrations to complete...') + time.sleep(5) + + + target_histories = defaultdict(dict) while True: targets = prometheus.get_targets() - for target in targets['activeTargets']: - break + if len(targets['activeTargets']) > 0: + for target in targets['activeTargets']: + labels = frozendict(target['labels']) + # even if the target health is unknown, ensure the key exists so the length check below + # is aware of the target + history = target_histories[labels] - for _ in tqdm(itertools.count()): - targets = prometheus.get_targets() + if target['health'] == 'unknown': + continue - cassandra_target = None + history[target['lastScrape']] = (target['health'], target['lastError']) - for target in targets['activeTargets']: - if target['labels']['job'] == 'cassandra': - cassandra_target = target - break + if all([len(v) >= 5 for v in target_histories.values()]): + break - if cassandra_target is not None: - if cassandra_target['health'] == 'up': - break + time.sleep(1) - time.sleep(1) + x = dict((target, history) for target, history in target_histories.items() + if any([health != 'up' for (health, error) in history.values()])) + if len(x): + print(x) - data = prometheus.query('test_family') - pass diff --git a/test/setup.py b/test/setup.py index 569e24f..32f4013 100644 --- a/test/setup.py +++ b/test/setup.py @@ -7,5 +7,5 @@ author='Adam Zegelin', author_email='adam@instaclustr.com', packages=['cassandra-exporter-e2e-tests'], - install_requires=['ccm', 'prometheus_client', 'cassandra-driver', 'frozendict'], + install_requires=['ccm', 'prometheus_client', 'cassandra-driver', 'frozendict', 'pyyaml', 'tqdm'], ) \ No newline at end of file diff --git a/test/utils/ccm.py b/test/utils/ccm.py index cc65c26..4e8f0a7 100644 --- a/test/utils/ccm.py +++ b/test/utils/ccm.py @@ -7,6 +7,7 @@ from ccmlib.cluster import Cluster from utils.jar_utils import ExporterJar +from utils.schema import CqlSchema class TestCluster(Cluster): @@ -95,6 +96,14 @@ def stop(self, wait=True, signal_event=signal.SIGTERM, **kwargs): return result + def apply_schema(self, schema: CqlSchema): + cql_cluster = cassandra.cluster.Cluster(list(contact_points)) + with cql_cluster.connect() as cql_session: + print('Applying schema...') + for stmt in args.schema: + print('Executing "{}"...'.format(stmt.split('\n')[0])) + cql_session.execute(stmt) + def __enter__(self): return self diff --git a/test/utils/jar_utils.py b/test/utils/jar_utils.py index e3990b4..767ca5e 100644 --- a/test/utils/jar_utils.py +++ b/test/utils/jar_utils.py @@ -1,4 +1,5 @@ import argparse +import logging import re import subprocess import zipfile @@ -11,6 +12,8 @@ class ExporterJar(namedtuple('ExporterJar', ['path', 'type'])): + logger = logging.getLogger(f'{__name__}.{__qualname__}') + class ExporterType(Enum): AGENT = ('Premain-Class', 'com.zegelin.cassandra.exporter.Agent') STANDALONE = ('Main-Class', 'com.zegelin.cassandra.exporter.Application') @@ -61,13 +64,25 @@ def addr_str(address: (str, int)): @staticmethod def default_jar_path(): project_dir = Path(__file__).parents[2] - try: - root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() - project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text - return project_dir / f'agent/target/cassandra-exporter-agent-{project_version}.jar' + root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() + project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text + + return project_dir / f'agent/target/cassandra-exporter-agent-{project_version}.jar' + + @classmethod + def add_jar_argument(cls, name, parser): + try: + default_path = ExporterJar.default_jar_path() + default_help = '(default: %(default)s)' - except: - return None + except Exception as e: + cls.logger.warning('failed to locate default exporter Jar', exc_info=True) + default_path = None + default_help = f'(default: failed to locate default exporter Jar: {e})' + parser.add_argument(name, type=ExporterJar.from_path, + help="location of the cassandra-exporter Jar, either agent or standalone " + default_help, + required=default_path is None, + default=str(default_path)) diff --git a/test/utils/prometheus.py b/test/utils/prometheus.py index 7ab8026..d0cdebb 100644 --- a/test/utils/prometheus.py +++ b/test/utils/prometheus.py @@ -6,6 +6,7 @@ import time import urllib.request import urllib.error +from collections import namedtuple from contextlib import contextmanager from enum import Enum, auto from pathlib import Path @@ -14,6 +15,10 @@ import yaml from tqdm import tqdm +import logging + +from utils.log_mixin import LogMixin + class _TqdmIOStream(object): def __init__(self, stream, t): @@ -36,42 +41,74 @@ def __getattr__(self, attr): return getattr(self._stream, attr) -class PrometheusInstance(object): - prometheus_directory: Path = None - prometheus_process: subprocess.Popen = None - @staticmethod - def default_prometheus_archive_url(): +class PrometheusArchive(namedtuple('PrometheusArchive', ['url'])): + logger = logging.getLogger(f'{__name__}.{__qualname__}') + + @classmethod + def default_prometheus_archive_url(cls): + def architecture_str(): + machine_aliases = { + 'x86_64': 'amd64' + } + + machine = platform.machine() + machine = machine_aliases.get(machine, machine) + + system = platform.system().lower() + + return f'{system}-{machine}' + + asset_pattern = re.compile(r'prometheus-.+\.' + architecture_str() + '\.tar\..+') + + with urllib.request.urlopen('https://api.github.com/repos/prometheus/prometheus/releases/latest') as response: + release_info = json.load(response) + + for asset in release_info['assets']: + if asset_pattern.fullmatch(asset['name']) is not None: + return asset['browser_download_url'] + + @classmethod + def add_archive_argument(cls, name, parser): try: - def architecture_str(): - machine_aliases = { - 'x86_64': 'amd64' - } + default_url = PrometheusArchive.default_prometheus_archive_url() + default_help = '(default: %(default)s)' - machine = platform.machine() - machine = machine_aliases.get(machine, machine) + except Exception as e: + cls.logger.warning('failed to determine Prometheus archive URL', exc_info=True) - system = platform.system().lower() + default_url = None + default_help = f'(default: failed to determine archive URL)' - return f'{system}-{machine}' + parser.add_argument(name, type=PrometheusArchive, + help="Prometheus binary release archive (tar, tar+gz, tar+bzip2) URL (schemes: http, https, file) " + default_help, + required=default_url is None, + default=str(default_url)) - asset_pattern = re.compile(r'prometheus-.+\.' + architecture_str() + '\.tar\..+') + def download(self, destination: Path): + print(f'Downloading {self.url} to {destination}...') - with urllib.request.urlopen('https://api.github.com/repos/prometheus/prometheus/releases/latest') as response: - release_info = json.load(response) + archive_roots = set() - for asset in release_info['assets']: - if asset_pattern.fullmatch(asset['name']) is not None: - return asset['browser_download_url'] + with urllib.request.urlopen(self.url) as response: + with tqdm(total=int(response.headers.get('Content-length')), unit='bytes', unit_scale=True, miniters=1) as t: + with tarfile.open(fileobj=_TqdmIOStream(response, t), mode='r|*') as archive: + for member in archive: + t.set_postfix(file=member.name) - except: - pass + archive_roots.add(Path(member.name).parts[0]) + + archive.extract(member, destination) - return None + return destination / next(iter(archive_roots)) - def __init__(self, prometheus_archive_url: str, base_directory: Path, listen_address='localhost:9090'): - self.prometheus_directory = self._download_prometheus(prometheus_archive_url, base_directory) +class PrometheusInstance(object): + prometheus_directory: Path = None + prometheus_process: subprocess.Popen = None + + def __init__(self, archive: PrometheusArchive, working_directory: Path, listen_address='localhost:9090'): + self.prometheus_directory = archive.download(working_directory) self.listen_address = listen_address def start(self, wait=True): @@ -105,7 +142,8 @@ def _modify_config(self): def set_scrape_config(self, job_name: str, static_targets: List[str]): with self._modify_config() as config: config['scrape_configs'] = [{ - 'job_name': 'cassandra', + 'job_name': job_name, + 'scrape_interval': '1s', 'static_configs': [{ 'targets': static_targets }] @@ -146,20 +184,5 @@ def __exit__(self, exc_type, exc_val, exc_tb): if self.prometheus_process is not None: self.prometheus_process.__exit__(exc_type, exc_val, exc_tb) - @staticmethod - def _download_prometheus(archive_url: str, destination: Path): - print(f'Downloading {archive_url} to {destination}...') - - archive_roots = set() - with urllib.request.urlopen(archive_url) as response: - with tqdm(total=int(response.headers.get('Content-length')), unit='bytes', unit_scale=True, miniters=1) as t: - with tarfile.open(fileobj=_TqdmIOStream(response, t), mode='r|*') as archive: - for member in archive: - t.set_postfix(file=member.name) - - archive_roots.add(Path(member.name).parts[0]) - - archive.extract(member, destination) - return destination / next(iter(archive_roots)) \ No newline at end of file diff --git a/test/utils/schema.py b/test/utils/schema.py new file mode 100644 index 0000000..4bf8e07 --- /dev/null +++ b/test/utils/schema.py @@ -0,0 +1,35 @@ +import argparse +import yaml +from pathlib import Path +from collections import namedtuple + +from utils.path_utils import existing_file_arg + + +class CqlSchema(namedtuple('CqlSchema', ['path', 'statements'])): + @classmethod + def from_path(cls, path): + path = existing_file_arg(path) + + with open(path, 'r') as f: + schema = yaml.load(f, Loader=yaml.SafeLoader) + + if not isinstance(schema, list): + raise argparse.ArgumentTypeError(f'root of the schema YAML must be a list. Got a {type(schema).__name__}.') + + for i, o in enumerate(schema): + if not isinstance(o, str): + raise argparse.ArgumentTypeError(f'schema YAML must be a list of statement strings. Item {i} is a {type(o).__name__}.') + + return cls(path, schema) + + @staticmethod + def default_schema_path(): + test_dir = Path(__file__).parents[1] + return test_dir / "schema.yaml" + + @staticmethod + def add_schema_argument(name, parser): + parser.add_argument(name, type=CqlSchema.from_path, + help="CQL schema to apply (default: %(default)s)", + default=str(CqlSchema.default_schema_path())) From 1e43d2146b7670fc3587d25de24f7a59b5d8e64a Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Wed, 8 Jan 2020 19:08:32 -0800 Subject: [PATCH 05/19] Tests WIP. --- test/capture_dump.py | 46 ++++++++-------------- test/debug_agent.py | 29 ++++++++++++-- test/e2e_test.py | 83 +++++++++++++++++----------------------- test/utils/ccm.py | 44 ++++++++++----------- test/utils/prometheus.py | 2 +- 5 files changed, 98 insertions(+), 106 deletions(-) diff --git a/test/capture_dump.py b/test/capture_dump.py index 78e50f3..8e0302d 100644 --- a/test/capture_dump.py +++ b/test/capture_dump.py @@ -2,21 +2,19 @@ import argparse import contextlib +import logging import os import tempfile -import time import urllib.request from pathlib import Path -import cassandra.cluster -import cassandra.connection - - from utils.ccm import TestCluster from utils.jar_utils import ExporterJar -from utils.path_utils import existing_file_arg from utils.schema import CqlSchema +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + def cluster_directory(path): path = Path(path) @@ -43,8 +41,6 @@ def output_directory(path): return path - - if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") @@ -75,37 +71,25 @@ def output_directory(path): os.makedirs(args.output_directory, exist_ok=True) with contextlib.ExitStack() as defer: + logger.info('Setting up Cassandra cluster.') ccm_cluster = defer.push(TestCluster( - args.cluster_directory, args.cassandra_version + cluster_directory=args.cluster_directory, + cassandra_version=args.cassandra_version, + exporter_jar=args.exporter_jar, + nodes=args.nodes, racks=args.racks, datacenters=args.datacenters, + delete_cluster_on_stop=not args.keep_cluster_directory, )) - print('Starting cluster...') + logger.info('Starting cluster.') ccm_cluster.start() - print('Applying schema...') - ccm_cluster.apply_schema() + logger.info('Applying CQL schema.') + ccm_cluster.apply_schema(args.schema) - print('Connecting to cluster...') - contact_points = map(lambda n: cassandra.connection.DefaultEndPoint(*n.network_interfaces['binary']), ccm_cluster.nodelist()) - - cql_cluster = cassandra.cluster.Cluster(list(contact_points)) - with cql_cluster.connect() as cql_session: - print('Applying schema...') - for stmt in args.schema: - print('Executing "{}"...'.format(stmt.split('\n')[0])) - cql_session.execute(stmt) - - # the collector defers registrations by a second or two. - # See com.zegelin.cassandra.exporter.Harvester.defer() - print('Pausing to wait for deferred MBean registrations to complete...') - time.sleep(5) - - print('Capturing metrics dump...') + logger.info('Capturing metrics dump.') for node in ccm_cluster.nodelist(): url = f'http://{node.ip_addr}:{node.exporter_port}/metrics?x-accept=text/plain' destination = args.output_directory / f'{node.name}.txt' urllib.request.urlretrieve(url, destination) - print(f'Wrote {url} to {destination}') - - + logger.info(f'Wrote {url} to {destination}') diff --git a/test/debug_agent.py b/test/debug_agent.py index 38eac05..6e8fe3e 100644 --- a/test/debug_agent.py +++ b/test/debug_agent.py @@ -1,16 +1,37 @@ +# simple script to launch a single-node CCM cluster with the exporter agent installed, and the C* JVM +# configured to start the remote debugger agent + import argparse import os from pathlib import Path +from ccmlib.cluster import Cluster from ccmlib.cluster_factory import ClusterFactory -from utils.ccm import create_ccm_cluster from utils.jar_utils import ExporterJar +def create_ccm_cluster(cluster_directory: Path, cassandra_version: str, node_count: int): + if cluster_directory.exists(): + cluster_directory.rmdir() # CCM wants to create this + + print('Creating cluster...') + ccm_cluster = Cluster( + path=cluster_directory.parent, + name=cluster_directory.name, + version=cassandra_version, + create_directory=True # if this is false, various config files wont be created... + ) + + ccm_cluster.populate(nodes=node_count) + + return ccm_cluster + + def yesno_bool(b: bool): return ('n', 'y')[b] + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") @@ -21,7 +42,8 @@ def yesno_bool(b: bool): parser.add_argument('--jvm-debug-address', type=str, help="address/port for JVM debug agent to listen on", default='5005') parser.add_argument('--exporter-args', type=str, help="exporter agent arguments", default='-l:9500') - parser.add_argument('-j', '--exporter-jar', type=ExporterJar.from_path, help="location of the cassandra-exporter jar, either agent or standalone (default: %(default)s)", default=str(ExporterJar.default_jar_path())) + + ExporterJar.add_jar_argument('--exporter-jar', parser) parser.set_defaults(jvm_debug_wait_attach=True) @@ -43,7 +65,8 @@ def yesno_bool(b: bool): node = ccm_cluster.nodelist()[0] print(f'Configuring node {node.name}') - node.set_environment_variable('JVM_OPTS', f'-javaagent:{args.exporter_jar.path}={args.exporter_args} -agentlib:jdwp=transport=dt_socket,server=y,suspend={yesno_bool(args.jvm_debug_wait_attach)},address={args.jvm_debug_address}') + node.set_environment_variable('JVM_OPTS', f'-javaagent:{args.exporter_jar.path}={args.exporter_args} ' + f'-agentlib:jdwp=transport=dt_socket,server=y,suspend={yesno_bool(args.jvm_debug_wait_attach)},address={args.jvm_debug_address}') print(f'JVM remote debugger listening on {args.jvm_debug_address}. JVM will suspend on start.') print('Starting single node cluster...') diff --git a/test/e2e_test.py b/test/e2e_test.py index cb5d259..dcb0a9e 100644 --- a/test/e2e_test.py +++ b/test/e2e_test.py @@ -8,38 +8,30 @@ import argparse import contextlib import http.server -import itertools -import json -import platform +import logging import random -import re import shutil -import socketserver -import subprocess -import tarfile +import sys import tempfile -import threading import time -import urllib.request -import urllib.error -from collections import namedtuple, defaultdict +from collections import defaultdict from pathlib import Path -from types import SimpleNamespace -import cassandra.connection from frozendict import frozendict -from utils.ccm import create_ccm_cluster, TestCluster +from utils.ccm import TestCluster from utils.jar_utils import ExporterJar from utils.path_utils import nonexistent_or_empty_directory_arg - -import yaml - from utils.prometheus import PrometheusInstance, PrometheusArchive from utils.schema import CqlSchema +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + + +class TestMetricsHTTPHandler(http.server.BaseHTTPRequestHandler): + """A test HTTP endpoint for Prometheus to scrape.""" -class DummyPrometheusHTTPHandler(http.server.BaseHTTPRequestHandler): def do_GET(self): if self.path != '/metrics': self.send_error(404) @@ -47,7 +39,7 @@ def do_GET(self): self.send_response(200) self.end_headers() - #if self.server.server_port == 9500: + # if self.server.server_port == 9500: if random.choice([True, False]): self.wfile.write(b'# TYPE test_family gauge\n' b'test_family 123\n') @@ -61,12 +53,17 @@ def do_GET(self): parser = argparse.ArgumentParser() parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") - parser.add_argument('-C', '--working-directory', type=nonexistent_or_empty_directory_arg, help="location to install Cassandra and Prometheus. Must be empty or not exist. (default is a temporary directory)") - parser.add_argument('--keep-working-directory', help="don't delete the cluster directory on exit", action='store_true') + parser.add_argument('-C', '--working-directory', type=nonexistent_or_empty_directory_arg, + help="location to install Cassandra and Prometheus. Must be empty or not exist. (default is a temporary directory)") + parser.add_argument('--keep-working-directory', help="don't delete the cluster directory on exit", + action='store_true') - parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", default=2) - parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", default=3) - parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", default=3) + parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", + default=2) + parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", + default=3) + parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", + default=3) ExporterJar.add_jar_argument('--exporter-jar', parser) CqlSchema.add_schema_argument('--schema', parser) @@ -77,20 +74,22 @@ def do_GET(self): if args.working_directory is None: args.working_directory = Path(tempfile.mkdtemp()) + def delete_working_dir(): shutil.rmtree(args.working_directory) + with contextlib.ExitStack() as defer: if not args.keep_working_directory: defer.callback(delete_working_dir) # LIFO order -- this gets called last - print('Setting up Prometheus...') + logger.info('Setting up Prometheus.') prometheus = defer.push(PrometheusInstance( archive=args.prometheus_archive, working_directory=args.working_directory )) - print('Setting up Cassandra...') + logger.info('Setting up Cassandra cluster.') ccm_cluster = defer.push(TestCluster( cluster_directory=args.working_directory / 'test-cluster', cassandra_version=args.cassandra_version, @@ -99,27 +98,22 @@ def delete_working_dir(): delete_cluster_on_stop=not args.keep_working_directory, )) - # httpd = http.server.HTTPServer(("", 9500), DummyPrometheusHTTPHandler) # threading.Thread(target=httpd.serve_forever, daemon=True).start() # # httpd = http.server.HTTPServer(("", 9501), DummyPrometheusHTTPHandler) # threading.Thread(target=httpd.serve_forever, daemon=True).start() - prometheus.set_scrape_config('cassandra', list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) + prometheus.set_scrape_config('cassandra', + list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) # prometheus.set_scrape_config('cassandra', ['localhost:9500', 'localhost:9501']) prometheus.start() - - - - - print('Starting cluster...') + logger.info('Starting Cassandra cluster.') ccm_cluster.start() - print('Pausing to wait for deferred MBean registrations to complete...') - time.sleep(5) - + logger.info('Applying CQL schema.') + ccm_cluster.apply_schema(args.schema) target_histories = defaultdict(dict) @@ -144,15 +138,10 @@ def delete_working_dir(): time.sleep(1) - x = dict((target, history) for target, history in target_histories.items() - if any([health != 'up' for (health, error) in history.values()])) - - if len(x): - print(x) - - - - - - + unhealthy_targets = dict((target, history) for target, history in target_histories.items() + if any([health != 'up' for (health, error) in history.values()])) + if len(unhealthy_targets): + logger.error('One or more Prometheus scrape targets was unhealthy.') + logger.error(unhealthy_targets) + sys.exit(-1) diff --git a/test/utils/ccm.py b/test/utils/ccm.py index 4e8f0a7..c4990a5 100644 --- a/test/utils/ccm.py +++ b/test/utils/ccm.py @@ -1,6 +1,7 @@ import shutil import signal import subprocess +import time from pathlib import Path from typing import List @@ -9,8 +10,15 @@ from utils.jar_utils import ExporterJar from utils.schema import CqlSchema +import cassandra.cluster +import cassandra.connection + +import logging + class TestCluster(Cluster): + logger = logging.getLogger(f'{__name__}.{__qualname__}') + standalone_processes: List[subprocess.Popen] = [] def __init__(self, cluster_directory: Path, cassandra_version: str, @@ -97,12 +105,18 @@ def stop(self, wait=True, signal_event=signal.SIGTERM, **kwargs): return result def apply_schema(self, schema: CqlSchema): - cql_cluster = cassandra.cluster.Cluster(list(contact_points)) - with cql_cluster.connect() as cql_session: - print('Applying schema...') - for stmt in args.schema: - print('Executing "{}"...'.format(stmt.split('\n')[0])) - cql_session.execute(stmt) + contact_points = map(lambda n: cassandra.connection.DefaultEndPoint(*n.network_interfaces['binary']), self.nodelist()) + + with cassandra.cluster.Cluster(list(contact_points)) as cql_cluster: + with cql_cluster.connect() as cql_session: + for stmt in schema.statements: + self.logger.debug('Executing CQL statement "{}".'.format(stmt.split('\n')[0])) + cql_session.execute(stmt) + + # the collector defers registrations by a second or two. + # See com.zegelin.cassandra.exporter.Harvester.defer() + self.logger.info('Pausing to wait for deferred MBean registrations to complete.') + time.sleep(5) def __enter__(self): return self @@ -111,21 +125,3 @@ def __exit__(self, exc_type, exc_val, exc_tb): if self.stop_on_exit: self.stop() - - -def create_ccm_cluster(cluster_directory: Path, cassandra_version: str, node_count: int): - if cluster_directory.exists(): - cluster_directory.rmdir() # CCM wants to create this - - print('Creating cluster...') - ccm_cluster = Cluster( - path=cluster_directory.parent, - name=cluster_directory.name, - version=cassandra_version, - create_directory=True # if this is false, various config files wont be created... - ) - - ccm_cluster.populate(nodes=node_count) - - return ccm_cluster - diff --git a/test/utils/prometheus.py b/test/utils/prometheus.py index d0cdebb..1d6fdff 100644 --- a/test/utils/prometheus.py +++ b/test/utils/prometheus.py @@ -143,7 +143,7 @@ def set_scrape_config(self, job_name: str, static_targets: List[str]): with self._modify_config() as config: config['scrape_configs'] = [{ 'job_name': job_name, - 'scrape_interval': '1s', + 'scrape_interval': '10s', 'static_configs': [{ 'targets': static_targets }] From d36cb3f201661f172437512dba174281ac56fa95 Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Tue, 25 Feb 2020 13:04:26 -0800 Subject: [PATCH 06/19] Added script to create a demo CCM cluster. --- test/create_demo_cluster.py | 88 +++++++++++++++++++++++++++++++++++++ test/utils/prometheus.py | 3 -- 2 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 test/create_demo_cluster.py diff --git a/test/create_demo_cluster.py b/test/create_demo_cluster.py new file mode 100644 index 0000000..575300b --- /dev/null +++ b/test/create_demo_cluster.py @@ -0,0 +1,88 @@ +# spin up a CCM cluster of the specified C* version and Exporter build. +# Useful for testing and demos. + +import argparse +import contextlib +import http.server +import logging +import random +import shutil +import sys +import tempfile +import time +from collections import defaultdict +from pathlib import Path + +import yaml +from frozendict import frozendict + +from utils.ccm import TestCluster +from utils.jar_utils import ExporterJar +from utils.path_utils import nonexistent_or_empty_directory_arg +from utils.prometheus import PrometheusInstance, PrometheusArchive +from utils.schema import CqlSchema + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('cassandra_version', type=str, help="version of Cassandra to run", metavar="CASSANDRA_VERSION") + + parser.add_argument('-C', '--working-directory', type=nonexistent_or_empty_directory_arg, + help="location to install Cassandra and Prometheus. Must be empty or not exist. (default is a temporary directory)") + parser.add_argument('--keep-working-directory', help="don't delete the cluster directory on exit", + action='store_true') + + parser.add_argument('-d', '--datacenters', type=int, help="number of data centers (default: %(default)s)", + default=1) + parser.add_argument('-r', '--racks', type=int, help="number of racks per data center (default: %(default)s)", + default=3) + parser.add_argument('-n', '--nodes', type=int, help="number of nodes per data center rack (default: %(default)s)", + default=3) + + ExporterJar.add_jar_argument('--exporter-jar', parser) + CqlSchema.add_schema_argument('--schema', parser) + PrometheusArchive.add_archive_argument('--prometheus-archive', parser) + + args = parser.parse_args() + + if args.working_directory is None: + args.working_directory = Path(tempfile.mkdtemp()) + + + def delete_working_dir(): + shutil.rmtree(args.working_directory) + + + with contextlib.ExitStack() as defer: + if not args.keep_working_directory: + defer.callback(delete_working_dir) # LIFO order -- this gets called last + + logger.info('Setting up Cassandra cluster.') + ccm_cluster = defer.push(TestCluster( + cluster_directory=args.working_directory / 'test-cluster', + cassandra_version=args.cassandra_version, + exporter_jar=args.exporter_jar, + nodes=args.nodes, racks=args.racks, datacenters=args.datacenters, + delete_cluster_on_stop=not args.keep_working_directory, + )) + + + + print('Prometheus scrape config:') + config = {'scrape_configs': [{ + 'job_name': 'cassandra', + 'scrape_interval': '10s', + 'static_configs': [{ + 'targets': [f'http://localhost:{node.exporter_port}' for node in ccm_cluster.nodelist()] + }] + }]} + + yaml.safe_dump(config, sys.stdout) + + ccm_cluster.start() + logger.info("Cluster is now running.") + + input("Press any key to stop cluster...") \ No newline at end of file diff --git a/test/utils/prometheus.py b/test/utils/prometheus.py index 1d6fdff..0834663 100644 --- a/test/utils/prometheus.py +++ b/test/utils/prometheus.py @@ -17,9 +17,6 @@ import logging -from utils.log_mixin import LogMixin - - class _TqdmIOStream(object): def __init__(self, stream, t): self._stream = stream From e6da3f19107e74773fc248e2dca145faaf26ca59 Mon Sep 17 00:00:00 2001 From: Jackson Fleming <74215062+jfleming-ic@users.noreply.github.com> Date: Fri, 22 Jul 2022 11:10:42 +1000 Subject: [PATCH 07/19] Update README.md Update README to include note that Cassandra 4.0 is unsupported. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c14e86f..e494b2e 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,8 @@ *Project Status: beta* +# Note - The Cassandra-Exporter tool does not support Cassandra 4.0 or newer, see the compatibility section for more details on supported versions. + ## Introduction From b0883afff14a5e96bd978ea7e707f2482676358b Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Wed, 9 Nov 2022 19:43:21 +1100 Subject: [PATCH 08/19] Porting e2e tests to click and Python 3. --- test/test_tool.py | 327 +++++++++++++++++++++++++++++++++++++++ test/utils/ccm.py | 55 ++++--- test/utils/jar_utils.py | 68 ++++---- test/utils/path_utils.py | 11 +- test/utils/schema.py | 21 +-- 5 files changed, 409 insertions(+), 73 deletions(-) create mode 100644 test/test_tool.py diff --git a/test/test_tool.py b/test/test_tool.py new file mode 100644 index 0000000..6af52a9 --- /dev/null +++ b/test/test_tool.py @@ -0,0 +1,327 @@ +import argparse +import inspect +import logging +import os +import sys +import typing as t +from contextlib import contextmanager +import shutil +import tempfile +from functools import wraps, update_wrapper, WRAPPER_UPDATES +from itertools import chain +from pathlib import Path +import pkg_resources + +import click + +from utils.ccm import TestCluster +from utils.jar_utils import ExporterJar +from utils.prometheus import PrometheusInstance +from utils.schema import CqlSchema + +logger = logging.getLogger('test-tool') + + +def ppstrlist(sl: t.List[t.Any], conj: str = 'or', quote: bool = False): + joins = [', '] * len(sl) + joins += [f' {conj} ', ''] + + joins = joins[-len(sl):] + + if quote: + sl = [f'"{s}"' for s in sl] + + return ''.join(chain.from_iterable(zip(sl, joins))) + + + +def fixup_kwargs(*skip: str): + """ + inspect the caller's frame, grab any arguments and shove them back into kwargs + + this is useful when the caller is a wrapper and wants to pass on the majority its arguments to the wrapped function + """ + + caller_frame = inspect.stack()[1].frame + args, _, kwvar, values = inspect.getargvalues(caller_frame) + + args: t.List[str] = [a for a in args if a not in skip] + + kwargs: t.Dict[str, t.Any] = values[kwvar] + + for a in args: + v = values[a] + if isinstance(v, click.Context): + continue + + kwargs[a] = v + + #kwargs.update(overrides) + + pass + + +def with_working_directory(): + def decorator(func: t.Callable) -> t.Callable: + @click.option('-C', '--working-directory', type=click.Path(path_type=Path), + help="location to install Cassandra and/or Prometheus. Must be empty or not exist. Defaults to a temporary directory.") + @click.option('--keep-working-directory', is_flag=True, + help="don't delete the working directory on exit.") + @click.pass_context + @wraps(func) + def wrapper(ctx: click.Context, working_directory: Path, keep_working_directory: bool, **kwargs): + @contextmanager + def working_dir_ctx(): + nonlocal working_directory, keep_working_directory + + if working_directory is None: + working_directory = Path(tempfile.mkdtemp()) + + logger.info('Working directory is: %s', working_directory) + + try: + yield working_directory + finally: + if not keep_working_directory: + logger.debug('Deleting working directory') + shutil.rmtree(working_directory) + + working_directory = ctx.with_resource(working_dir_ctx()) + + fixup_kwargs() + + func(**kwargs) + + return wrapper + + return decorator + + +class ExporterJarParamType(click.ParamType): + name = "path" + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> ExporterJar: + if isinstance(value, ExporterJar): + return value + + try: + if isinstance(value, str): + for t in ExporterJar.ExporterType: + if t.name.lower() == value.lower(): + return ExporterJar.from_path(ExporterJar.default_jar_path(t)) + + + return ExporterJar.from_path(value) + + except Exception as e: + self.fail(str(e), param, ctx) + + +class CqlSchemaParamType(click.ParamType): + name = "path" + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> CqlSchema: + if isinstance(value, CqlSchema): + return value + + try: + return CqlSchema.from_path(value) + + except Exception as e: + self.fail(str(e), param, ctx) + + +def with_ccm_cluster(): + def decorator(func: t.Callable) -> t.Callable: + + jar_default_path = None + + # noinspection PyBroadException + try: + jar_default_path = ExporterJar.default_jar_path() + + except: + logger.warning('Failed to determine default cassandra-exporter jar path', exc_info=True) + + jar_types = [type.name.lower() for type in ExporterJar.ExporterType] + + @click.argument('cassandra_version') + @click.option('--cluster-name', 'cassandra_cluster_name', default='test-cluster', show_default=True) + @click.option('--topology', 'cassandra_topology', + type=(int, int, int), default=(2, 3, 1), show_default=True, + metavar='DCS RACKS NODES', help="number of data centers, racks per data center, and nodes per rack.") + @click.option('-j', '--exporter-jar', required=True, default=jar_default_path, show_default=True, type=ExporterJarParamType(), + help=f"path of the cassandra-exporter jar, either {ppstrlist(jar_types)} builds, or one of {ppstrlist(jar_types, quote=True)} for the default jar of that type.") + @click.option('-s', '--schema', 'cql_schema', default=CqlSchema.default_schema_path(), show_default=True, type=CqlSchemaParamType(), + help='path of the CQL schema YAML file to apply on cluster start. The YAML file must contain a list of CQL statement strings.') + @click.pass_context + @wraps(func) + def wrapper(ctx: click.Context, + cassandra_version: str, cassandra_cluster_name: str, cassandra_topology: t.Tuple[int, int, int], + exporter_jar: ExporterJar, + cql_schema: t.Optional[CqlSchema], + working_directory: Path, **kwargs): + + datacenters, racks, nodes, = cassandra_topology + + logger.info('Creating Cassandra %s cluster, with:') + logger.info(' Topology: %s data center(s), %s rack(s) per DC, %s node(s) per rack (%s node(s) total)', datacenters, racks, nodes, (nodes * racks * datacenters)) + logger.info(' cassandra-exporter: %s', exporter_jar) + + ccm_cluster = ctx.with_resource(TestCluster( + cluster_directory=(working_directory / cassandra_cluster_name), + cassandra_version=cassandra_version, + nodes=nodes*racks*datacenters, racks=racks, datacenters=datacenters, + exporter_jar=exporter_jar, + initial_schema=cql_schema + )) + + fixup_kwargs() + + func(ccm_cluster=ccm_cluster, **kwargs) + + + return wrapper + + return decorator + + +def with_prometheus(): + def decorator(func: t.Callable) -> t.Callable: + @click.option('--prometheus-version', 'prometheus_version', default='test-cluster', show_default=True) + @click.pass_context + @wraps(func) + def wrapper(ctx: click.Context, + working_directory: Path, + ccm_cluster: t.Optional[TestCluster] = None, + **kwargs): + + + # prometheus = ctx.with_resource(PrometheusInstance( + # archive=args.prometheus_archive, + # working_directory=working_directory + # )) + + if ccm_cluster: + pass + # prometheus.set_scrape_config('cassandra', + # [f'localhost:{n.exporter_port}' for n in ccm_cluster.nodelist()] + # ) + + fixup_kwargs() + + func(prometheus=None, **kwargs) + + return wrapper + + + return decorator + + +@click.group() +def cli(): + pass + + + +@cli.command('demo') +@with_working_directory() +@with_ccm_cluster() +# @with_prometheus() +# @click.option('--hello') +def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): + """ + Start C* with the exporter jar (agent or standalone). + Optionally setup a schema. + Wait for ctrl-c to shut everything down. + """ + ccm_cluster.start() + + config = {'scrape_configs': [{ + 'job_name': 'cassandra', + 'scrape_interval': '10s', + 'static_configs': [{ + 'targets': [f'http://localhost:{node.exporter_port}' for node in ccm_cluster.nodelist()] + }] + }]} + + sys.stderr.flush() + sys.stdout.flush() + + input("Press any key to stop cluster...") + + +@cli.command() +def dump(): + pass + +# capture dump (start C* with exporter, fetch and write metrics to file) + # this is every similar to the demo cmd +# validate dump (check for syntax errors, etc) +# compare/diff dump (list metrics added & removed) + + +@cli.command() +@with_working_directory() +@with_ccm_cluster() +@with_prometheus() +def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance): + """ + Run end-to-end tests. + + - Start C* with the exporter JAR (agent or standalone). + - Setup a schema. + - Configure and start prometheus. + - Wait for all scrape targets to get healthy. + - Run some tests. + """ + + logger.info('Starting Prometheus.') + prometheus.start() + + logger.info('Starting Cassandra cluster.') + ccm_cluster.start() + + while True: + targets = prometheus.get_targets() + + if len(targets['activeTargets']) > 0: + for target in targets['activeTargets']: + labels = frozendict(target['labels']) + + # even if the target health is unknown, ensure the key exists so the length check below + # is aware of the target + history = target_histories[labels] + + if target['health'] == 'unknown': + continue + + history[target['lastScrape']] = (target['health'], target['lastError']) + + if all([len(v) >= 5 for v in target_histories.values()]): + break + + time.sleep(1) + + unhealthy_targets = dict((target, history) for target, history in target_histories.items() + if any([health != 'up' for (health, error) in history.values()])) + + if len(unhealthy_targets): + logger.error('One or more Prometheus scrape targets was unhealthy.') + logger.error(unhealthy_targets) + sys.exit(-1) + + +def main(): + # load ccm extensions (useful for ccm-java8, for example) + for entry_point in pkg_resources.iter_entry_points(group='ccm_extension'): + entry_point.load()() + + cli() + + +if __name__ == '__main__': + os.environ['CCM_JAVA8_DEBUG'] = 'please' + logging.basicConfig(level=logging.DEBUG) + #logger.info("Hello!") + main() \ No newline at end of file diff --git a/test/utils/ccm.py b/test/utils/ccm.py index c4990a5..da95b7f 100644 --- a/test/utils/ccm.py +++ b/test/utils/ccm.py @@ -3,7 +3,7 @@ import subprocess import time from pathlib import Path -from typing import List +from typing import List, Optional from ccmlib.cluster import Cluster @@ -24,10 +24,10 @@ class TestCluster(Cluster): def __init__(self, cluster_directory: Path, cassandra_version: str, nodes: int, racks: int, datacenters: int, exporter_jar: ExporterJar, - stop_on_exit: bool = True, delete_cluster_on_stop: bool = True): + initial_schema: Optional[CqlSchema]): if cluster_directory.exists(): - cluster_directory.rmdir() # CCM wants to create this + raise RuntimeError(f'Cluster directory {cluster_directory} must not exist.') # CCM wants to create this super().__init__( path=cluster_directory.parent, @@ -36,10 +36,8 @@ def __init__(self, cluster_directory: Path, cassandra_version: str, create_directory=True # if this is false, various config files wont be created... ) - self.stop_on_exit = stop_on_exit - self.delete_cluster_on_stop = delete_cluster_on_stop - self.exporter_jar = exporter_jar + self.initial_schema = initial_schema self.populate(nodes, racks, datacenters) @@ -69,15 +67,23 @@ def populate(self, nodes: int, racks: int = 1, datacenters: int = 1, return result - def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True, wait_other_notice=True, jvm_args=None, + def start(self, verbose=False, wait_for_binary_proto=True, wait_other_notice=True, jvm_args=None, profile_options=None, quiet_start=False, allow_root=False, **kwargs): - result = super().start(no_wait, verbose, wait_for_binary_proto, wait_other_notice, jvm_args, profile_options, + self.logger.info('Starting Cassandra cluster...') + result = super().start(False, verbose, wait_for_binary_proto, wait_other_notice, jvm_args, profile_options, quiet_start, allow_root, **kwargs) + self.logger.info('Cassandra cluster started successfully') + + if self.initial_schema: + self.logger.info('Applying initial CQL schema...') + self.apply_schema(self.initial_schema) # start the standalone exporters, if requested if self.exporter_jar.type == ExporterJar.ExporterType.STANDALONE: for node in self.nodelist(): + self.logger.info('Starting standalone cassandra-exporter for node %s...', node.ip_addr) + process = self.exporter_jar.start_standalone( logfile_path=Path(node.get_path()) / 'logs' / 'cassandra-exporter.log', listen_address=('localhost', node.exporter_port), @@ -87,20 +93,24 @@ def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True, wait_o self.standalone_processes.append(process) + self.logger.info('Standalone cassandra-exporters started successfully') + return result def stop(self, wait=True, signal_event=signal.SIGTERM, **kwargs): - result = super().stop(wait, signal_event, **kwargs) - - # shutdown standalone exporters, if they're still running - for p in self.standalone_processes: - p.terminate() + if len(self.standalone_processes): + # shutdown standalone exporters, if they're still running + self.logger.info('Stopping standalone cassandra-exporters...') + for p in self.standalone_processes: + p.terminate() - if wait: - p.wait() + if wait: + p.wait() + self.logger.info('Standalone cassandra-exporters stopped') - if self.delete_cluster_on_stop: - shutil.rmtree(self.get_path()) + self.logger.info('Stopping Cassandra cluster...') + result = super().stop(wait, signal_event, **kwargs) + self.logger.info('Cassandra cluster stopped') return result @@ -113,15 +123,14 @@ def apply_schema(self, schema: CqlSchema): self.logger.debug('Executing CQL statement "{}".'.format(stmt.split('\n')[0])) cql_session.execute(stmt) - # the collector defers registrations by a second or two. - # See com.zegelin.cassandra.exporter.Harvester.defer() - self.logger.info('Pausing to wait for deferred MBean registrations to complete.') - time.sleep(5) + # # the collector defers registrations by a second or two. + # # See com.zegelin.cassandra.exporter.Harvester.defer() + # self.logger.info('Pausing to wait for deferred MBean registrations to complete.') + # time.sleep(5) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if self.stop_on_exit: - self.stop() + self.stop() diff --git a/test/utils/jar_utils.py b/test/utils/jar_utils.py index 767ca5e..b3f7069 100644 --- a/test/utils/jar_utils.py +++ b/test/utils/jar_utils.py @@ -1,28 +1,38 @@ import argparse +from dataclasses import dataclass import logging import re import subprocess +import typing as t import zipfile -from collections import namedtuple from enum import Enum +from os import PathLike from pathlib import Path from xml.etree import ElementTree from utils.path_utils import existing_file_arg +logger = logging.getLogger(__name__) -class ExporterJar(namedtuple('ExporterJar', ['path', 'type'])): - logger = logging.getLogger(f'{__name__}.{__qualname__}') +@dataclass +class ExporterJar: class ExporterType(Enum): AGENT = ('Premain-Class', 'com.zegelin.cassandra.exporter.Agent') STANDALONE = ('Main-Class', 'com.zegelin.cassandra.exporter.Application') + def path(self, version: str): + lname = self.name.lower() + return f'{lname}/target/cassandra-exporter-{lname}-{version}.jar' + + path: Path + type: ExporterType + @classmethod - def from_path(cls, path): + def from_path(cls, path: PathLike) -> 'ExporterJar': path = existing_file_arg(path) - # determine the JAR type (agent or standalone) via the Main/Premain class + # determine the JAR type (agent or standalone) via the Main/Premain class name listed in the manifest try: with zipfile.ZipFile(path) as zf: manifest = zf.open('META-INF/MANIFEST.MF').readlines() @@ -35,12 +45,24 @@ def parse_line(line): type = next(iter([t for t in ExporterJar.ExporterType if t.value in manifest.items()]), None) if type is None: - raise argparse.ArgumentTypeError(f'"{path}" is not a cassandra-exporter jar.') + raise ValueError(f'no manifest attribute found that matches known values') return cls(path, type) - except (zipfile.BadZipFile, KeyError): - raise argparse.ArgumentTypeError(f'"{path}" is not a jar.') + except Exception as e: + raise ValueError(f'{path} is not a valid cassandra-exporter jar: {e}') + + @staticmethod + def default_jar_path(type: ExporterType = ExporterType.AGENT) -> Path: + project_dir = Path(__file__).parents[2] + + root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() + project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text + + return project_dir / type.path(project_version) + + def __str__(self) -> str: + return f'{self.path} ({self.type.name})' def start_standalone(self, listen_address: (str, int), jmx_address: (str, int), @@ -50,7 +72,7 @@ def start_standalone(self, listen_address: (str, int), logfile = logfile_path.open('w') def addr_str(address: (str, int)): - return ':'.join(address) + return ':'.join(map(str, address)) command = ['java', '-jar', self.path, @@ -58,31 +80,7 @@ def addr_str(address: (str, int)): '--jmx-service-url', f'service:jmx:rmi:///jndi/rmi://{addr_str(jmx_address)}/jmxrmi', '--cql-address', addr_str(cql_address) ] - print(' '.join(map(str, command))) - return subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) - - @staticmethod - def default_jar_path(): - project_dir = Path(__file__).parents[2] - - root_pom = ElementTree.parse(project_dir / 'pom.xml').getroot() - project_version = root_pom.find('{http://maven.apache.org/POM/4.0.0}version').text - - return project_dir / f'agent/target/cassandra-exporter-agent-{project_version}.jar' - - @classmethod - def add_jar_argument(cls, name, parser): - try: - default_path = ExporterJar.default_jar_path() - default_help = '(default: %(default)s)' - - except Exception as e: - cls.logger.warning('failed to locate default exporter Jar', exc_info=True) - default_path = None - default_help = f'(default: failed to locate default exporter Jar: {e})' + logger.debug('Standalone exec(%s)', command) - parser.add_argument(name, type=ExporterJar.from_path, - help="location of the cassandra-exporter Jar, either agent or standalone " + default_help, - required=default_path is None, - default=str(default_path)) + return subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) \ No newline at end of file diff --git a/test/utils/path_utils.py b/test/utils/path_utils.py index 5bc571a..32ba789 100644 --- a/test/utils/path_utils.py +++ b/test/utils/path_utils.py @@ -1,14 +1,15 @@ import argparse +from os import PathLike from pathlib import Path -def existing_file_arg(path): +def existing_file_arg(path: PathLike): path = Path(path) if not path.exists(): - raise argparse.ArgumentTypeError(f'file "{path}" does not exist.') + raise ValueError(f'file "{path}" does not exist.') if not path.is_file(): - raise argparse.ArgumentTypeError(f'"{path}" is not a regular file.') + raise ValueError(f'"{path}" is not a regular file.') return path @@ -18,9 +19,9 @@ def nonexistent_or_empty_directory_arg(path): if path.exists(): if not path.is_dir(): - raise argparse.ArgumentTypeError(f'"{path}" must be a directory.') + raise ValueError(f'"{path}" must be a directory.') if next(path.iterdir(), None) is not None: - raise argparse.ArgumentTypeError(f'"{path}" must be an empty directory.') + raise ValueError(f'"{path}" must be an empty directory.') return path \ No newline at end of file diff --git a/test/utils/schema.py b/test/utils/schema.py index 4bf8e07..af56689 100644 --- a/test/utils/schema.py +++ b/test/utils/schema.py @@ -1,35 +1,36 @@ import argparse +from dataclasses import dataclass +from os import PathLike +import typing as t import yaml from pathlib import Path from collections import namedtuple from utils.path_utils import existing_file_arg +@dataclass +class CqlSchema: + path: Path + statements: t.List[str] -class CqlSchema(namedtuple('CqlSchema', ['path', 'statements'])): @classmethod - def from_path(cls, path): + def from_path(cls, path: PathLike) -> 'CqlSchema': path = existing_file_arg(path) with open(path, 'r') as f: schema = yaml.load(f, Loader=yaml.SafeLoader) if not isinstance(schema, list): - raise argparse.ArgumentTypeError(f'root of the schema YAML must be a list. Got a {type(schema).__name__}.') + raise ValueError(f'root of the schema YAML must be a list. Got a {type(schema).__name__}.') for i, o in enumerate(schema): if not isinstance(o, str): - raise argparse.ArgumentTypeError(f'schema YAML must be a list of statement strings. Item {i} is a {type(o).__name__}.') + raise ValueError(f'schema YAML must be a list of statement strings. Item {i} is a {type(o).__name__}.') return cls(path, schema) @staticmethod - def default_schema_path(): + def default_schema_path() -> Path: test_dir = Path(__file__).parents[1] return test_dir / "schema.yaml" - @staticmethod - def add_schema_argument(name, parser): - parser.add_argument(name, type=CqlSchema.from_path, - help="CQL schema to apply (default: %(default)s)", - default=str(CqlSchema.default_schema_path())) From 0e9aab26d3396661043d3ed752ff8017027f1a94 Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Mon, 28 Nov 2022 20:04:26 +1100 Subject: [PATCH 09/19] More test rework (fixed Prometheus server starting, etc) --- test/test_tool.py | 200 +++++++++++++++++++++++++++------------ test/utils/ccm.py | 14 +-- test/utils/jar_utils.py | 26 ++--- test/utils/net.py | 13 +++ test/utils/prometheus.py | 139 ++++++++++++++++++++------- 5 files changed, 275 insertions(+), 117 deletions(-) create mode 100644 test/utils/net.py diff --git a/test/test_tool.py b/test/test_tool.py index 6af52a9..1580c11 100644 --- a/test/test_tool.py +++ b/test/test_tool.py @@ -3,6 +3,7 @@ import logging import os import sys +import tarfile import typing as t from contextlib import contextmanager import shutil @@ -10,13 +11,16 @@ from functools import wraps, update_wrapper, WRAPPER_UPDATES from itertools import chain from pathlib import Path +from tarfile import TarFile + import pkg_resources import click +import cloup from utils.ccm import TestCluster from utils.jar_utils import ExporterJar -from utils.prometheus import PrometheusInstance +from utils.prometheus import PrometheusInstance, RemotePrometheusArchive, archive_from_path_or_url from utils.schema import CqlSchema logger = logging.getLogger('test-tool') @@ -146,14 +150,17 @@ def decorator(func: t.Callable) -> t.Callable: jar_types = [type.name.lower() for type in ExporterJar.ExporterType] @click.argument('cassandra_version') - @click.option('--cluster-name', 'cassandra_cluster_name', default='test-cluster', show_default=True) - @click.option('--topology', 'cassandra_topology', - type=(int, int, int), default=(2, 3, 1), show_default=True, - metavar='DCS RACKS NODES', help="number of data centers, racks per data center, and nodes per rack.") - @click.option('-j', '--exporter-jar', required=True, default=jar_default_path, show_default=True, type=ExporterJarParamType(), - help=f"path of the cassandra-exporter jar, either {ppstrlist(jar_types)} builds, or one of {ppstrlist(jar_types, quote=True)} for the default jar of that type.") - @click.option('-s', '--schema', 'cql_schema', default=CqlSchema.default_schema_path(), show_default=True, type=CqlSchemaParamType(), - help='path of the CQL schema YAML file to apply on cluster start. The YAML file must contain a list of CQL statement strings.') + @cloup.option_group( + "Cassandra", + cloup.option('--cluster-name', 'cassandra_cluster_name', default='test-cluster', show_default=True), + cloup.option('--topology', 'cassandra_topology', + type=(int, int, int), default=(2, 3, 1), show_default=True, + metavar='DCS RACKS NODES', help="number of data centers, racks per data center, and nodes per rack."), + cloup.option('-j', '--exporter-jar', required=True, default=jar_default_path, show_default=True, type=ExporterJarParamType(), + help=f"path of the cassandra-exporter jar, either {ppstrlist(jar_types)} builds, or one of {ppstrlist(jar_types, quote=True)} for the default jar of that type."), + cloup.option('-s', '--schema', 'cql_schema', default=CqlSchema.default_schema_path(), show_default=True, type=CqlSchemaParamType(), + help='path of the CQL schema YAML file to apply on cluster start. The YAML file must contain a list of CQL statement strings.') + ) @click.pass_context @wraps(func) def wrapper(ctx: click.Context, @@ -186,31 +193,69 @@ def wrapper(ctx: click.Context, return decorator +# class PrometheusArchiveParamType(click.ParamType): +# name = "tag/path/URL" +# +# def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> PrometheusArchive: +# if isinstance(value, PrometheusArchive): +# return value +# +# try: +# if isinstance(value, str): +# for t in ExporterJar.ExporterType: +# if t.name.lower() == value.lower(): +# return ExporterJar.from_path(ExporterJar.default_jar_path(t)) +# +# +# return ExporterJar.from_path(value) +# +# except Exception as e: +# self.fail(str(e), param, ctx) + + def with_prometheus(): def decorator(func: t.Callable) -> t.Callable: - @click.option('--prometheus-version', 'prometheus_version', default='test-cluster', show_default=True) + @cloup.option_group( + "Prometheus Archive", + cloup.option('--prometheus-version', metavar='TAG'), + cloup.option('--prometheus-archive', metavar='PATH/URL'), + constraint=cloup.constraints.mutually_exclusive + ) + #@click.option('--prometheus-version', 'prometheus_version', default='test-cluster', show_default=True) @click.pass_context @wraps(func) def wrapper(ctx: click.Context, + prometheus_version: str, + prometheus_archive: str, working_directory: Path, ccm_cluster: t.Optional[TestCluster] = None, **kwargs): + if prometheus_version is None and prometheus_archive is None: + prometheus_version = 'latest' + + if prometheus_version is not None: + archive = RemotePrometheusArchive.for_tag(prometheus_version) + + else: + archive = archive_from_path_or_url(prometheus_archive) - # prometheus = ctx.with_resource(PrometheusInstance( - # archive=args.prometheus_archive, - # working_directory=working_directory - # )) + if isinstance(archive, RemotePrometheusArchive): + archive = archive.download() + + prometheus = ctx.with_resource(PrometheusInstance( + archive=archive, + working_directory=working_directory + )) if ccm_cluster: - pass - # prometheus.set_scrape_config('cassandra', - # [f'localhost:{n.exporter_port}' for n in ccm_cluster.nodelist()] - # ) + prometheus.set_static_scrape_config('cassandra', + [str(n.exporter_address) for n in ccm_cluster.nodelist()] + ) fixup_kwargs() - func(prometheus=None, **kwargs) + func(prometheus=prometheus, **kwargs) return wrapper @@ -218,7 +263,7 @@ def wrapper(ctx: click.Context, return decorator -@click.group() +@cloup.group() def cli(): pass @@ -227,8 +272,6 @@ def cli(): @cli.command('demo') @with_working_directory() @with_ccm_cluster() -# @with_prometheus() -# @click.option('--hello') def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): """ Start C* with the exporter jar (agent or standalone). @@ -237,13 +280,8 @@ def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): """ ccm_cluster.start() - config = {'scrape_configs': [{ - 'job_name': 'cassandra', - 'scrape_interval': '10s', - 'static_configs': [{ - 'targets': [f'http://localhost:{node.exporter_port}' for node in ccm_cluster.nodelist()] - }] - }]} + for node in ccm_cluster.nodelist(): + logger.info('Node %s cassandra-exporter running on http://%s', node.name, node.network_interfaces['exporter']) sys.stderr.flush() sys.stdout.flush() @@ -251,10 +289,43 @@ def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): input("Press any key to stop cluster...") -@cli.command() +@cli.group('dump') def dump(): pass + +@dump.command('capture') +@with_working_directory() +@with_ccm_cluster() +@click.argument('filename') +def dump_capture(ccm_cluster: TestCluster, filename: str, **kwargs): + """Capture metrics from cassandra-exporter and save them to disk.""" + + logger.info('Capturing metrics dump.') + + # with tarfile.open(filename, 'w') as tf: + # tf. + + + + # for node in ccm_cluster.nodelist(): + # url = f'http://{node.network_interfaces["exporter"]}/metrics?x-accept=text/plain' + # destination = args.output_directory / f'{node.name}.txt' + # urllib.request.urlretrieve(url, destination) + # + # logger.info(f'Wrote {url} to {destination}') + + +@dump.command('validate') +def dump_validate(): + pass + + +def dump_compare(): + pass + + + # capture dump (start C* with exporter, fetch and write metrics to file) # this is every similar to the demo cmd # validate dump (check for syntax errors, etc) @@ -265,9 +336,9 @@ def dump(): @with_working_directory() @with_ccm_cluster() @with_prometheus() -def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance): +def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance, **kwargs): """ - Run end-to-end tests. + Run cassandra-exporter end-to-end tests. - Start C* with the exporter JAR (agent or standalone). - Setup a schema. @@ -276,41 +347,44 @@ def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance): - Run some tests. """ - logger.info('Starting Prometheus.') - prometheus.start() - - logger.info('Starting Cassandra cluster.') ccm_cluster.start() - while True: - targets = prometheus.get_targets() - - if len(targets['activeTargets']) > 0: - for target in targets['activeTargets']: - labels = frozendict(target['labels']) - - # even if the target health is unknown, ensure the key exists so the length check below - # is aware of the target - history = target_histories[labels] - - if target['health'] == 'unknown': - continue - - history[target['lastScrape']] = (target['health'], target['lastError']) - - if all([len(v) >= 5 for v in target_histories.values()]): - break - - time.sleep(1) - - unhealthy_targets = dict((target, history) for target, history in target_histories.items() - if any([health != 'up' for (health, error) in history.values()])) + prometheus.start() - if len(unhealthy_targets): - logger.error('One or more Prometheus scrape targets was unhealthy.') - logger.error(unhealthy_targets) - sys.exit(-1) + input("Press any key to stop cluster...") + # while True: + # targets = prometheus.get_targets() + # + # if len(targets['activeTargets']) > 0: + # for target in targets['activeTargets']: + # labels = frozendict(target['labels']) + # + # # even if the target health is unknown, ensure the key exists so the length check below + # # is aware of the target + # history = target_histories[labels] + # + # if target['health'] == 'unknown': + # continue + # + # history[target['lastScrape']] = (target['health'], target['lastError']) + # + # if all([len(v) >= 5 for v in target_histories.values()]): + # break + # + # time.sleep(1) + # + # unhealthy_targets = dict((target, history) for target, history in target_histories.items() + # if any([health != 'up' for (health, error) in history.values()])) + # + # if len(unhealthy_targets): + # logger.error('One or more Prometheus scrape targets was unhealthy.') + # logger.error(unhealthy_targets) + # sys.exit(-1) + + +# def timing(): +# def main(): # load ccm extensions (useful for ccm-java8, for example) diff --git a/test/utils/ccm.py b/test/utils/ccm.py index da95b7f..bdb1caa 100644 --- a/test/utils/ccm.py +++ b/test/utils/ccm.py @@ -8,6 +8,7 @@ from ccmlib.cluster import Cluster from utils.jar_utils import ExporterJar +from utils.net import SocketAddress from utils.schema import CqlSchema import cassandra.cluster @@ -47,10 +48,10 @@ def populate(self, nodes: int, racks: int = 1, datacenters: int = 1, result = super().populate(nodes, debug, tokens, use_vnodes, ipprefix, ipformat, install_byteman) for i, node in enumerate(self.nodelist()): - node.exporter_port = 9500 + i + node.exporter_address = SocketAddress(node.ip_addr, 9500 + i) if self.exporter_jar.type == ExporterJar.ExporterType.AGENT: - node.set_environment_variable('JVM_OPTS', f'-javaagent:{self.exporter_jar.path}=-l:{node.exporter_port}') + node.set_environment_variable('JVM_OPTS', f'-javaagent:{self.exporter_jar.path}=-l{node.exporter_address}') # set dc/rack manually, since CCM doesn't support custom racks node.set_configuration_options({ @@ -63,7 +64,8 @@ def populate(self, nodes: int, racks: int = 1, datacenters: int = 1, node.dc_idx = (int(i / nodes * racks)) + 1 with open(rackdc_path, 'w') as f: - f.write(f'dc=dc{node.dc_idx}\nrack=rack{node.rack_idx}\n') + print(f'dc=dc{node.dc_idx}', file=f) + print(f'rack=rack{node.rack_idx}', file=f) return result @@ -86,9 +88,9 @@ def start(self, verbose=False, wait_for_binary_proto=True, wait_other_notice=Tru process = self.exporter_jar.start_standalone( logfile_path=Path(node.get_path()) / 'logs' / 'cassandra-exporter.log', - listen_address=('localhost', node.exporter_port), - jmx_address=('localhost', node.jmx_port), - cql_address=node.network_interfaces["binary"] + listen_address=node.exporter_address, + jmx_address=SocketAddress('localhost', node.jmx_port), + cql_address=SocketAddress(*node.network_interfaces["binary"]) ) self.standalone_processes.append(process) diff --git a/test/utils/jar_utils.py b/test/utils/jar_utils.py index b3f7069..d3a02f9 100644 --- a/test/utils/jar_utils.py +++ b/test/utils/jar_utils.py @@ -10,13 +10,13 @@ from pathlib import Path from xml.etree import ElementTree +from utils.net import SocketAddress from utils.path_utils import existing_file_arg -logger = logging.getLogger(__name__) - - @dataclass class ExporterJar: + logger = logging.getLogger(f'{__name__}.{__qualname__}') + class ExporterType(Enum): AGENT = ('Premain-Class', 'com.zegelin.cassandra.exporter.Agent') STANDALONE = ('Main-Class', 'com.zegelin.cassandra.exporter.Application') @@ -64,23 +64,23 @@ def default_jar_path(type: ExporterType = ExporterType.AGENT) -> Path: def __str__(self) -> str: return f'{self.path} ({self.type.name})' - def start_standalone(self, listen_address: (str, int), - jmx_address: (str, int), - cql_address: (str, int), + def start_standalone(self, listen_address: SocketAddress, + jmx_address: SocketAddress, + cql_address: SocketAddress, logfile_path: Path): - logfile = logfile_path.open('w') + self.logger.info('Standalone log file: %s', logfile_path) - def addr_str(address: (str, int)): - return ':'.join(map(str, address)) + logfile = logfile_path.open('w') command = ['java', '-jar', self.path, - '--listen', addr_str(listen_address), - '--jmx-service-url', f'service:jmx:rmi:///jndi/rmi://{addr_str(jmx_address)}/jmxrmi', - '--cql-address', addr_str(cql_address) + '--listen', listen_address, + '--jmx-service-url', f'service:jmx:rmi:///jndi/rmi://{jmx_address}/jmxrmi', + '--cql-address', cql_address ] + command = [str(v) for v in command] - logger.debug('Standalone exec(%s)', command) + self.logger.debug('Standalone exec(%s)', ' '.join(command)) return subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) \ No newline at end of file diff --git a/test/utils/net.py b/test/utils/net.py new file mode 100644 index 0000000..44aec4d --- /dev/null +++ b/test/utils/net.py @@ -0,0 +1,13 @@ +import typing + + +class SocketAddress(typing.NamedTuple): + host: str + port: int + + def __str__(self) -> str: + return f'{self.host}:{self.port}' + + +# def addr_str(address: (str, int)): +# return ':'.join(map(str, address)) \ No newline at end of file diff --git a/test/utils/prometheus.py b/test/utils/prometheus.py index 0834663..d7bcf4a 100644 --- a/test/utils/prometheus.py +++ b/test/utils/prometheus.py @@ -1,3 +1,4 @@ +import http.client import json import platform import re @@ -10,8 +11,10 @@ from contextlib import contextmanager from enum import Enum, auto from pathlib import Path -from typing import List +from typing import List, NamedTuple, Optional, Union +from urllib.parse import urlparse +import appdirs import yaml from tqdm import tqdm @@ -38,12 +41,28 @@ def __getattr__(self, attr): return getattr(self._stream, attr) +class LocalPrometheusArchive(NamedTuple): + path: Path + + def extract(self, destination_directory: Path) -> Path: + archive_roots = set() + + with tarfile.open(self.path, mode='r') as archive: + for member in archive: + archive_roots.add(Path(member.name).parts[0]) + + archive.extract(member, destination_directory) + + return destination_directory / next(iter(archive_roots)) + + +class RemotePrometheusArchive(NamedTuple): + url: str -class PrometheusArchive(namedtuple('PrometheusArchive', ['url'])): logger = logging.getLogger(f'{__name__}.{__qualname__}') @classmethod - def default_prometheus_archive_url(cls): + def for_tag(cls, tag: str): def architecture_str(): machine_aliases = { 'x86_64': 'amd64' @@ -58,85 +77,132 @@ def architecture_str(): asset_pattern = re.compile(r'prometheus-.+\.' + architecture_str() + '\.tar\..+') - with urllib.request.urlopen('https://api.github.com/repos/prometheus/prometheus/releases/latest') as response: + with urllib.request.urlopen(f'https://api.github.com/repos/prometheus/prometheus/releases/{tag}') as response: release_info = json.load(response) for asset in release_info['assets']: if asset_pattern.fullmatch(asset['name']) is not None: - return asset['browser_download_url'] + return RemotePrometheusArchive(asset['browser_download_url'], ) + + + # @classmethod + # def default_prometheus_archive_url(cls): + # return cls.archive_url_for_tag('latest') + + # @classmethod + # def add_archive_argument(cls, name, parser): + # try: + # default_url = PrometheusArchive.default_prometheus_archive_url() + # default_help = '(default: %(default)s)' + # + # except Exception as e: + # cls.logger.warning('failed to determine Prometheus archive URL', exc_info=True) + # + # default_url = None + # default_help = f'(default: failed to determine archive URL)' + # + # parser.add_argument(name, type=PrometheusArchive, + # help="Prometheus binary release archive (tar, tar+gz, tar+bzip2) URL (schemes: http, https, file) " + default_help, + # required=default_url is None, + # default=str(default_url)) + + @staticmethod + def default_download_cache_directory() -> Path: + return Path(appdirs.user_cache_dir('cassandra-exporter-e2e')) / 'prometheus' + + def download(self, download_cache_directory: Path = None) -> LocalPrometheusArchive: + if download_cache_directory is None: + download_cache_directory = RemotePrometheusArchive.default_download_cache_directory() + + url_parts = urlparse(self.url) + url_path = Path(url_parts.path) + + destination = download_cache_directory / url_path.name + destination.parent.mkdir(parents=True, exist_ok=True) + + if destination.exists(): + return LocalPrometheusArchive(destination) + + self.logger.info(f'Downloading {self.url} to {destination}...') - @classmethod - def add_archive_argument(cls, name, parser): try: - default_url = PrometheusArchive.default_prometheus_archive_url() - default_help = '(default: %(default)s)' + with tqdm(unit='bytes', unit_scale=True, miniters=1) as t: + def report(block_idx: int, block_size: int, file_size: int): + if t.total is None: + t.reset(file_size) - except Exception as e: - cls.logger.warning('failed to determine Prometheus archive URL', exc_info=True) + t.update(block_size) - default_url = None - default_help = f'(default: failed to determine archive URL)' + urllib.request.urlretrieve(self.url, destination, report) - parser.add_argument(name, type=PrometheusArchive, - help="Prometheus binary release archive (tar, tar+gz, tar+bzip2) URL (schemes: http, https, file) " + default_help, - required=default_url is None, - default=str(default_url)) + except: + destination.unlink(missing_ok=True) # don't leave half-download files around + raise - def download(self, destination: Path): - print(f'Downloading {self.url} to {destination}...') + return LocalPrometheusArchive(destination) - archive_roots = set() - with urllib.request.urlopen(self.url) as response: - with tqdm(total=int(response.headers.get('Content-length')), unit='bytes', unit_scale=True, miniters=1) as t: - with tarfile.open(fileobj=_TqdmIOStream(response, t), mode='r|*') as archive: - for member in archive: - t.set_postfix(file=member.name) +def archive_from_path_or_url(purl: str) -> Union[LocalPrometheusArchive, RemotePrometheusArchive]: + url_parts = urlparse(purl) - archive_roots.add(Path(member.name).parts[0]) + if url_parts.netloc == '': + return LocalPrometheusArchive(Path(purl)) - archive.extract(member, destination) + return RemotePrometheusArchive(purl) - return destination / next(iter(archive_roots)) +class PrometheusInstance: + logger = logging.getLogger(f'{__name__}.{__qualname__}') -class PrometheusInstance(object): prometheus_directory: Path = None prometheus_process: subprocess.Popen = None - def __init__(self, archive: PrometheusArchive, working_directory: Path, listen_address='localhost:9090'): - self.prometheus_directory = archive.download(working_directory) + def __init__(self, archive: LocalPrometheusArchive, working_directory: Path, listen_address='localhost:9090'): + self.prometheus_directory = archive.extract(working_directory) self.listen_address = listen_address def start(self, wait=True): + logfile_path = self.prometheus_directory / 'prometheus.log' + logfile = logfile_path.open('w') + + self.logger.info('Starting Prometheus...') self.prometheus_process = subprocess.Popen( args=[str(self.prometheus_directory / 'prometheus'), f'--web.listen-address={self.listen_address}'], - cwd=str(self.prometheus_directory) + cwd=str(self.prometheus_directory), + stdout=logfile, + stderr=subprocess.STDOUT ) if wait: + self.logger.info('Waiting for Prometheus to become ready...') while not self.is_ready(): time.sleep(1) + self.logger.info('Prometheus started successfully') + def stop(self): + self.logger.info('Stopping Prometheus...') + if self.prometheus_process is not None: self.prometheus_process.terminate() + self.logger.info('Prometheus stopped successfully') + @contextmanager def _modify_config(self): config_file_path = self.prometheus_directory / 'prometheus.yml' with config_file_path.open('r+') as stream: - config = yaml.load(stream) + config = yaml.safe_load(stream) yield config stream.seek(0) - yaml.dump(config, stream) + yaml.safe_dump(config, stream) stream.truncate() - def set_scrape_config(self, job_name: str, static_targets: List[str]): + def set_static_scrape_config(self, job_name: str, static_targets: List[str]): with self._modify_config() as config: config['scrape_configs'] = [{ 'job_name': job_name, @@ -151,6 +217,9 @@ def is_ready(self): with urllib.request.urlopen(f'http://{self.listen_address}/-/ready') as response: return response.status == 200 + except urllib.error.HTTPError as e: + return False + except urllib.error.URLError as e: if isinstance(e.reason, ConnectionRefusedError): return False From bbdf426eb0ff33d7b12dbd54651457f1e515f738 Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Wed, 7 Dec 2022 12:17:32 +1100 Subject: [PATCH 10/19] Prometheus now started with TLS to detect cases where Prometheus server is already running. Other WiP stuff. --- test/pyproject.toml | 11 ++ test/test_tool.py | 51 ++++----- test/utils/prometheus.py | 202 +++++++++++++++++++++++---------- test/utils/prometheus_tests.py | 163 ++++++++++++++++++++++++++ 4 files changed, 340 insertions(+), 87 deletions(-) create mode 100644 test/pyproject.toml create mode 100644 test/utils/prometheus_tests.py diff --git a/test/pyproject.toml b/test/pyproject.toml new file mode 100644 index 0000000..eec228f --- /dev/null +++ b/test/pyproject.toml @@ -0,0 +1,11 @@ +[build-system] +requires = ["setuptools", "setuptools-scm"] + +[project] +dependencies = [ + 'click', + 'cloup', + 'ccm', + 'appdirs', + 'cryptography' +] \ No newline at end of file diff --git a/test/test_tool.py b/test/test_tool.py index 1580c11..86cf3d5 100644 --- a/test/test_tool.py +++ b/test/test_tool.py @@ -4,6 +4,7 @@ import os import sys import tarfile +import time import typing as t from contextlib import contextmanager import shutil @@ -60,10 +61,6 @@ def fixup_kwargs(*skip: str): kwargs[a] = v - #kwargs.update(overrides) - - pass - def with_working_directory(): def decorator(func: t.Callable) -> t.Callable: @@ -221,7 +218,7 @@ def decorator(func: t.Callable) -> t.Callable: cloup.option('--prometheus-archive', metavar='PATH/URL'), constraint=cloup.constraints.mutually_exclusive ) - #@click.option('--prometheus-version', 'prometheus_version', default='test-cluster', show_default=True) + @click.pass_context @wraps(func) def wrapper(ctx: click.Context, @@ -353,27 +350,29 @@ def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance, **kwargs): input("Press any key to stop cluster...") - # while True: - # targets = prometheus.get_targets() - # - # if len(targets['activeTargets']) > 0: - # for target in targets['activeTargets']: - # labels = frozendict(target['labels']) - # - # # even if the target health is unknown, ensure the key exists so the length check below - # # is aware of the target - # history = target_histories[labels] - # - # if target['health'] == 'unknown': - # continue - # - # history[target['lastScrape']] = (target['health'], target['lastError']) - # - # if all([len(v) >= 5 for v in target_histories.values()]): - # break - # - # time.sleep(1) - # + while True: + targets = prometheus.api.get_targets() + + pass + + # if len(targets['activeTargets']) > 0: + # for target in targets['activeTargets']: + # labels = frozendict(target['labels']) + # + # # even if the target health is unknown, ensure the key exists so the length check below + # # is aware of the target + # history = target_histories[labels] + # + # if target['health'] == 'unknown': + # continue + # + # history[target['lastScrape']] = (target['health'], target['lastError']) + # + # if all([len(v) >= 5 for v in target_histories.values()]): + # break + + time.sleep(1) + # unhealthy_targets = dict((target, history) for target, history in target_histories.items() # if any([health != 'up' for (health, error) in history.values()])) # diff --git a/test/utils/prometheus.py b/test/utils/prometheus.py index d7bcf4a..a7827e2 100644 --- a/test/utils/prometheus.py +++ b/test/utils/prometheus.py @@ -2,24 +2,33 @@ import json import platform import re +import signal +import ssl import subprocess import tarfile import time import urllib.request import urllib.error -from collections import namedtuple from contextlib import contextmanager -from enum import Enum, auto +from datetime import datetime, timedelta from pathlib import Path from typing import List, NamedTuple, Optional, Union from urllib.parse import urlparse import appdirs +from cryptography import x509 +from cryptography.x509.oid import NameOID +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives import serialization import yaml from tqdm import tqdm import logging +from utils.net import SocketAddress + + class _TqdmIOStream(object): def __init__(self, stream, t): self._stream = stream @@ -85,27 +94,6 @@ def architecture_str(): return RemotePrometheusArchive(asset['browser_download_url'], ) - # @classmethod - # def default_prometheus_archive_url(cls): - # return cls.archive_url_for_tag('latest') - - # @classmethod - # def add_archive_argument(cls, name, parser): - # try: - # default_url = PrometheusArchive.default_prometheus_archive_url() - # default_help = '(default: %(default)s)' - # - # except Exception as e: - # cls.logger.warning('failed to determine Prometheus archive URL', exc_info=True) - # - # default_url = None - # default_help = f'(default: failed to determine archive URL)' - # - # parser.add_argument(name, type=PrometheusArchive, - # help="Prometheus binary release archive (tar, tar+gz, tar+bzip2) URL (schemes: http, https, file) " + default_help, - # required=default_url is None, - # default=str(default_url)) - @staticmethod def default_download_cache_directory() -> Path: return Path(appdirs.user_cache_dir('cassandra-exporter-e2e')) / 'prometheus' @@ -151,47 +139,147 @@ def archive_from_path_or_url(purl: str) -> Union[LocalPrometheusArchive, RemoteP return RemotePrometheusArchive(purl) +class PrometheusApi: + def __init__(self, address: SocketAddress, ssl_context: ssl.SSLContext): + self.address = address + self.ssl_context = ssl_context + + def _api_call(self, path): + with urllib.request.urlopen(f'https://{self.address}{path}', context=self.ssl_context) as response: + response_envelope = json.load(response) + + if response_envelope['status'] != 'success': + raise Exception(response.url, response.status, response_envelope) + + return response_envelope['data'] + + def get_targets(self): + return self._api_call('/api/v1/targets') + + def query(self, q): + return self._api_call(f'/api/v1/query?query={q}') + + class PrometheusInstance: logger = logging.getLogger(f'{__name__}.{__qualname__}') - prometheus_directory: Path = None - prometheus_process: subprocess.Popen = None + listen_address: SocketAddress + directory: Path = None + process: subprocess.Popen = None + + tls_key_path: Path + tls_cert_path: Path + ssl_context: ssl.SSLContext + + api: PrometheusApi - def __init__(self, archive: LocalPrometheusArchive, working_directory: Path, listen_address='localhost:9090'): - self.prometheus_directory = archive.extract(working_directory) + def __init__(self, archive: LocalPrometheusArchive, working_directory: Path, + listen_address: SocketAddress = SocketAddress('localhost', 9090)): + self.directory = archive.extract(working_directory) self.listen_address = listen_address + self.setup_tls() + + self.api = PrometheusApi(listen_address, self.ssl_context) + + def setup_tls(self): + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048 + ) + + self.tls_key_path = (self.directory / 'tls_key.pem') + with self.tls_key_path.open('wb') as f: + f.write(private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + )) + + subject = issuer = x509.Name([ + x509.NameAttribute(NameOID.COUNTRY_NAME, u"AU"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Australian Capital Territory"), + x509.NameAttribute(NameOID.LOCALITY_NAME, u"Canberra"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Instaclustr Pty Ltd"), + x509.NameAttribute(NameOID.COMMON_NAME, u"Temporary Prometheus Server Certificate"), + ]) + + cert = x509.CertificateBuilder().subject_name( + subject + ).issuer_name( + issuer + ).public_key( + private_key.public_key() + ).serial_number( + x509.random_serial_number() + ).not_valid_before( + datetime.utcnow() + ).not_valid_after( + # certificate will be valid for a day + datetime.utcnow() + timedelta(days=1) + ).add_extension( + x509.SubjectAlternativeName([x509.DNSName(self.listen_address.host)]), + critical=False, + ).sign(private_key, hashes.SHA256()) # Sign certificate with private key + + self.tls_cert_path = (self.directory / 'tls_cert.pem') + with self.tls_cert_path.open('wb') as f: + f.write(cert.public_bytes(serialization.Encoding.PEM)) + + self.ssl_context = ssl.SSLContext() + self.ssl_context.load_verify_locations(self.tls_cert_path) + self.ssl_context.verify_mode = ssl.VerifyMode.CERT_REQUIRED + def start(self, wait=True): - logfile_path = self.prometheus_directory / 'prometheus.log' + web_config_path = (self.directory / 'web-config.yaml') + with web_config_path.open('w') as f: + config = { + 'tls_server_config': { + 'cert_file': str(self.tls_cert_path), + 'key_file': str(self.tls_key_path) + } + } + + yaml.safe_dump(config, f) + + logfile_path = self.directory / 'prometheus.log' logfile = logfile_path.open('w') self.logger.info('Starting Prometheus...') - self.prometheus_process = subprocess.Popen( - args=[str(self.prometheus_directory / 'prometheus'), + self.process = subprocess.Popen( + args=[str(self.directory / 'prometheus'), + f'--web.config.file={web_config_path}', f'--web.listen-address={self.listen_address}'], - cwd=str(self.prometheus_directory), + cwd=str(self.directory), stdout=logfile, stderr=subprocess.STDOUT ) if wait: - self.logger.info('Waiting for Prometheus to become ready...') - while not self.is_ready(): - time.sleep(1) + self.wait_ready() self.logger.info('Prometheus started successfully') def stop(self): self.logger.info('Stopping Prometheus...') - if self.prometheus_process is not None: - self.prometheus_process.terminate() + if self.process is not None: + self.process.terminate() self.logger.info('Prometheus stopped successfully') + def wait_ready(self): + self.logger.info('Waiting for Prometheus to become ready...') + while not self.is_ready(): + rc = self.process.poll() + if rc is not None: + raise Exception(f'Prometheus process {self.process.pid} exited unexpectedly with rc {rc} while waiting for ready state!') + + time.sleep(1) + @contextmanager def _modify_config(self): - config_file_path = self.prometheus_directory / 'prometheus.yml' + config_file_path = self.directory / 'prometheus.yml' with config_file_path.open('r+') as stream: config = yaml.safe_load(stream) @@ -199,47 +287,42 @@ def _modify_config(self): yield config stream.seek(0) - yaml.safe_dump(config, stream) stream.truncate() - def set_static_scrape_config(self, job_name: str, static_targets: List[str]): + yaml.safe_dump(config, stream) + + if self.process is not None: + self.process.send_signal(signal.SIGHUP) + self.wait_ready() + + def set_static_scrape_config(self, job_name: str, static_targets: List[Union[str, SocketAddress]]): with self._modify_config() as config: config['scrape_configs'] = [{ 'job_name': job_name, 'scrape_interval': '10s', 'static_configs': [{ - 'targets': static_targets + 'targets': [str(t) for t in static_targets] }] }] def is_ready(self): try: - with urllib.request.urlopen(f'http://{self.listen_address}/-/ready') as response: + with urllib.request.urlopen(f'https://{self.listen_address}/-/ready', context=self.ssl_context) as response: return response.status == 200 except urllib.error.HTTPError as e: + self.logger.debug('HTTP error while checking for ready state: %s', e) return False except urllib.error.URLError as e: + self.logger.debug('urllib error while checking for ready state: %s', e) if isinstance(e.reason, ConnectionRefusedError): return False - raise e - - def _api_call(self, path): - with urllib.request.urlopen(f'http://{self.listen_address}{path}') as response: - response_envelope = json.load(response) - - if response_envelope['status'] != 'success': - raise Exception(response.url, response.status, response_envelope) - - return response_envelope['data'] - - def get_targets(self): - return self._api_call('/api/v1/targets') + if isinstance(e.reason, ssl.SSLError): + self.logger.warning('SSL/TLS errors may mean that an instance of Prometheus (or some other server) is already listening on %s. Check the port.', self.listen_address) - def query(self, q): - return self._api_call(f'/api/v1/query?query={q}') + raise e def __enter__(self): return self @@ -247,8 +330,5 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): self.stop() - if self.prometheus_process is not None: - self.prometheus_process.__exit__(exc_type, exc_val, exc_tb) - - - + if self.process is not None: + self.process.__exit__(exc_type, exc_val, exc_tb) diff --git a/test/utils/prometheus_tests.py b/test/utils/prometheus_tests.py new file mode 100644 index 0000000..218095b --- /dev/null +++ b/test/utils/prometheus_tests.py @@ -0,0 +1,163 @@ +import contextlib +import http.server +import logging +import random +import socketserver +import tempfile +import threading +import time +import typing +import unittest +from collections import defaultdict +from datetime import datetime +from enum import Enum, auto +from functools import partial +from pathlib import Path +from typing import Dict + +from frozendict import frozendict + +from utils.net import SocketAddress +from utils.prometheus import PrometheusInstance, RemotePrometheusArchive + + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(f'{__name__}') + + +ENDPOINT_ADDRESS = SocketAddress('localhost', 9500) + + +class EndpointMode(Enum): + RETURN_VALID_RESPONSE = auto() + RETURN_INVALID_RESPONSE = auto() + + +class TestMetricsHTTPHandler(http.server.BaseHTTPRequestHandler): + """A test HTTP endpoint for Prometheus to scrape.""" + + mode: EndpointMode + + def __init__(self, mode: EndpointMode, *args) -> None: + self.mode = mode + super().__init__(*args) + + def do_GET(self): + if self.path != '/metrics': + self.send_error(404) + + self.send_response(200) + self.end_headers() + + if self.mode == EndpointMode.RETURN_VALID_RESPONSE: + self.wfile.write(b'# TYPE test_family gauge\n' + b'test_family 123\n') + + elif self.mode == EndpointMode.RETURN_INVALID_RESPONSE: + self.wfile.write(b'# TYPE test_family gauge\n' + b'test_family123\n') + + else: + raise NotImplementedError(f'unknown mode {self.mode}') + + + + +# class TargetScrapeStatus(typing.NamedTuple): +# health: str +# lastError: str +# +# +# TargetsScrapeHistory = Dict[str, Dict[str, TargetScrapeStatus]] +# +# +# def collect_target_scrape_history(min_scrapes: int = 5) -> TargetsScrapeHistory: +# target_histories = defaultdict(dict) +# +# while True: +# targets = prometheus.api.get_targets() +# print(targets) +# +# for target in targets['activeTargets']: +# labels = frozendict(target['labels']) +# +# history = target_histories[labels] +# +# if target['health'] == 'unknown': +# # hasn't been scraped yet +# continue +# +# ts = target['lastScrape'] +# history[ts] = TargetScrapeStatus(target['health'], target['lastError']) +# +# # collect min_scrapes or more scrape statuses for each target +# if len(target_histories) > 0 and all([len(v) >= min_scrapes for v in target_histories.values()]): +# break +# +# time.sleep(1) +# +# return target_histories +# +# +# def is_target_healthy(target: str, scrape_history: TargetsScrapeHistory) -> bool: +# target_history = scrape_history[target] +# +# return len(target_history) and all([h.health == 'up' for h in target_history.values()]) + + + +# assert run_test(EndpointMode.RETURN_VALID_RESPONSE) is True +# assert run_test(EndpointMode.RETURN_INVALID_RESPONSE) is False + + +class TestMetricsHandlerTest(unittest.TestCase): + def test(self): + cm = contextlib.ExitStack() + + work_dir = Path(cm.enter_context(tempfile.TemporaryDirectory())) + + archive = RemotePrometheusArchive.for_tag('latest').download() + prometheus: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir)) + + prometheus.start() + + + def run_test(mode: EndpointMode): + httpd = http.server.HTTPServer(ENDPOINT_ADDRESS, partial(TestMetricsHTTPHandler, mode)) + thread = threading.Thread(target=httpd.serve_forever, daemon=True) + + thread.start() + + try: + pass + # prometheus.set_static_scrape_config('test', [ENDPOINT_ADDRESS]) + # + # history = collect_target_scrape_history() + # print(history) + # return is_target_healthy('test', history) + + finally: + httpd.shutdown() + thread.join() + + +class ConcurrentPrometheusInstancesTest(unittest.TestCase): + def test_concurrent_instances(self): + cm = contextlib.ExitStack() # TODO: clean this up + + work_dir1 = Path(cm.enter_context(tempfile.TemporaryDirectory())) # TODO: make these delete only if no exception occured + work_dir2 = Path(cm.enter_context(tempfile.TemporaryDirectory())) + + archive = RemotePrometheusArchive.for_tag('latest').download() + prometheus1: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir1)) + prometheus2: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir2)) + + prometheus1.start() + + with self.assertRaisesRegex(Exception, 'certificate verify failed'): + prometheus2.start() + + + + +pass \ No newline at end of file From 640f8ffa49fe1a796cf6cc29cf988d169e47a1e8 Mon Sep 17 00:00:00 2001 From: Adam Zegelin Date: Fri, 9 Dec 2022 17:52:32 +1100 Subject: [PATCH 11/19] test code refactoring, cleanup, added new features, etc. --- test/lib/__pycache__/ccm.cpython-39.pyc | Bin 0 -> 7522 bytes .../__pycache__/click_helpers.cpython-39.pyc | Bin 0 -> 6225 bytes test/lib/__pycache__/dump.cpython-39.pyc | Bin 0 -> 3543 bytes .../lib/__pycache__/dump_tests.cpython-39.pyc | Bin 0 -> 2967 bytes test/lib/__pycache__/jar_utils.cpython-39.pyc | Bin 0 -> 4400 bytes test/lib/__pycache__/net.cpython-39.pyc | Bin 0 -> 564 bytes .../lib/__pycache__/path_utils.cpython-39.pyc | Bin 0 -> 798 bytes .../lib/__pycache__/prometheus.cpython-39.pyc | Bin 0 -> 13412 bytes .../prometheus_tests.cpython-39.pyc | Bin 0 -> 4001 bytes test/lib/__pycache__/schema.cpython-39.pyc | Bin 0 -> 2063 bytes test/{utils => lib}/ccm.py | 92 ++++- test/lib/click_helpers.py | 150 +++++++++ test/lib/dump.py | 77 +++++ test/lib/dump_tests.py | 115 +++++++ test/lib/experiment.py | 79 +++++ test/{utils => lib}/jar_utils.py | 30 +- test/{utils => lib}/net.py | 4 - test/{utils => lib}/path_utils.py | 5 +- test/{utils => lib}/prometheus.py | 71 +++- test/{utils => lib}/prometheus_tests.py | 11 +- test/{utils => lib}/schema.py | 17 +- test/{ => old}/capture_dump.py | 6 +- test/{ => old}/create_demo_cluster.py | 12 +- test/{ => old}/debug_agent.py | 2 +- test/{ => old}/e2e_test.py | 38 +-- test/{ => old}/e2e_test_tests.py | 0 test/{ => old}/metric_dump_tool.py | 0 test/{ => old}/metric_dump_tool_tests.py | 1 - test/test_tool.py | 313 ++---------------- test/tools/__pycache__/dump.cpython-39.pyc | Bin 0 -> 3037 bytes test/tools/dump.py | 105 ++++++ test/validate_metrics.py | 1 - 32 files changed, 756 insertions(+), 373 deletions(-) create mode 100644 test/lib/__pycache__/ccm.cpython-39.pyc create mode 100644 test/lib/__pycache__/click_helpers.cpython-39.pyc create mode 100644 test/lib/__pycache__/dump.cpython-39.pyc create mode 100644 test/lib/__pycache__/dump_tests.cpython-39.pyc create mode 100644 test/lib/__pycache__/jar_utils.cpython-39.pyc create mode 100644 test/lib/__pycache__/net.cpython-39.pyc create mode 100644 test/lib/__pycache__/path_utils.cpython-39.pyc create mode 100644 test/lib/__pycache__/prometheus.cpython-39.pyc create mode 100644 test/lib/__pycache__/prometheus_tests.cpython-39.pyc create mode 100644 test/lib/__pycache__/schema.cpython-39.pyc rename test/{utils => lib}/ccm.py (56%) create mode 100644 test/lib/click_helpers.py create mode 100644 test/lib/dump.py create mode 100644 test/lib/dump_tests.py create mode 100644 test/lib/experiment.py rename test/{utils => lib}/jar_utils.py (78%) rename test/{utils => lib}/net.py (66%) rename test/{utils => lib}/path_utils.py (78%) rename test/{utils => lib}/prometheus.py (83%) rename test/{utils => lib}/prometheus_tests.py (91%) rename test/{utils => lib}/schema.py (69%) rename test/{ => old}/capture_dump.py (97%) rename test/{ => old}/create_demo_cluster.py (90%) rename test/{ => old}/debug_agent.py (98%) rename test/{ => old}/e2e_test.py (79%) rename test/{ => old}/e2e_test_tests.py (100%) rename test/{ => old}/metric_dump_tool.py (100%) rename test/{ => old}/metric_dump_tool_tests.py (99%) create mode 100644 test/tools/__pycache__/dump.cpython-39.pyc create mode 100644 test/tools/dump.py delete mode 100644 test/validate_metrics.py diff --git a/test/lib/__pycache__/ccm.cpython-39.pyc b/test/lib/__pycache__/ccm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b1296775fe2f9b2c0fa8e417da24379666137a7 GIT binary patch literal 7522 zcmb7J+jAS&dEXnb3t$0)6hx7tB(G)J6+9wj*>0TiNNQz@^2C*C)nwXuo6L5(XGsED zV8OEsN!$QaPpOXcqBuz})0x;aRY+ zv~IyJ$TX)0#jbAaqOAo=*RYLl$u5b$9F*;{SXZ$t_$>rxw`y0#TrsG1=j=Jr)`NO? z-k$F+*bAa>1gE+UyU{&upBDX6u-H9gpAqeHu+&|)m%C@}v!X8tEA}~Y#`*mBf_=fi zaP$(Z9Lu)#zQjyceJru6Up-XqmsyR?J(lc?Ox~25^{??xX`?Bpg@@ejM`&pG+<334 zq}ttf6sN{V{kR?W+yFEBS}=%WpQE|x?fKq)Cknm$e(VH*^n2~zE|!@)?MH*YbN`{s zccZk_??*AGc}!K-AN50y4L)#r+L-CwbGh5yI_Ue@xw^h=v(K>8Y!RebVrSSAewRO!G|4^-vaF<)EkBBL ze5Z0NZA7u#V{Q=ke5cPt54iE8r+PZ?Wi)YEoBLiI@&gRHQ3PP_IuCpvfqH477cxIe z3*7bYM`?w*vFrIgP$EiAKPQUQ0aa@4UOR5PffEUgdt*crKB-+qQBk4|R73elI+plS zEHim11Fg!J>Y@DCa;zLm9d#&oG?c&oDJq(!v?$i# znI8{&al7lUa~|@v5Dof1PYuVR_c~5m?7JKY;pgxyzD(6wRH@Mq`-1>`HjPw^{9uPS zXznysi&UMVYKf}JqNj3NavU!J5gg~K^xJo?em4T5ue!|bUcJY|4#*T;ozQ4?jQ^`K zD1J3)Z(jvRxB3VCWo-KrN<=3~1w*bYMOnjFk(K`{>P5MzB#jw1X^mg=wG8+>8^1NK zz4=b-4SbvGiQ9TY3(~gu)KE$&g04$g8%u}M5hzO30nPNGF)SVHqjIc(YU;6cDBqTT z@T>1DqsqvPwV~N5fc}*;(xEamog!0WeOMXFD}Wm+ix8eO5VcAUk%NpIaXlc8I0G#T{=!GVkhlb0WX_YL0a_>1w-h>;3hOEqf_~1@Lw9tizr8|{9d`!GP%+6j5rjqSO152`Z=ob7`cHl&vpVOIu0oXTzJ4`3*j zcQ}YD@jsyIGF61hw@@Nd2a>v^G~@;4oP18H%NJBg;IpE(Bsb)5oshq=veAC(6Citda0vz3URTccMOQg zKSLZ##}b6`d!up=rwz-UvJeo0g*Xk%M^zyhg%ybvS0v2W;yDNtgz8X<=kNIS{L|<+vf2DDH7JTjI^~dd`1fH6f@5wZ z=Qm>)T-@8W*2d<^%57Jx)w+|s_QJ3Qy1`puRy6R)-t7#6;2`~Cv9xEm#lF9NnOVV8o?bA!DQ4o`8r-E;XtQ?pIXkeE4SYwTL)%y&mKR6)t+Hg4Y3&}p z4U7acMi|OW&zl$!Qp$;Q?egZ@oy9 zeCY5n1mQB5lYg6@@Gjj?$68{Xlj8kHn7oS;(Q75uloue5vE*ioP)RG}maIkYCA3z;wV`eq#V$;uGJ z{T_Y^uSron z>x8^8`?J|u;X)TTJo5d123)vE+mx?PEj`abBH-_ILa5_rhD;Zv zt$+NXK$YMC{s{s71p!s_pwRQ-j5;%D5*keSopr!b+!kUvt zmVURijQUyaVq?BmGgMA1Je0|YSVlz=l%&9vM4k$hzfhRM)W_OLuu@aopd`=Epy!cG z91nQ!IsT(fPUR)~4>U5>M_b)SoHas}Pc{h(P@}X6D>9wpu_pyFQf3wOO>!wkzshQH zfxbH`gY(Id4q*#VbTNly2ofPWSvIza_z<=d(cT$q0dnaHQZ;P(%K|(5q$uE5*g1AS zhC)3?OcH_BU<6>Otm~r(-xE)$>a`CNq=EvtQ0~P2m#Bg@wQ|zTYrDL&|ADf3b z6K_(63N+%y=aneQ1+k4#q7;hn0)IcKjSwJ{XMQaGRN2=yv+$v*hzvqnfLDS%nUX54 zH@NW3PDpC`I;kly?DlW|1GeP%P$lm?Y{z>JqW`g)nJP)<_}&PyR*3Ekp!wz$qgY90>SW>?P>q#uh1F z#0i2}P-K)*n#q`51}TsOh%imC*eePL9FB0lcN;SxK5iBU7SdkhoiaC(Gm9)UnSP7- z@%dvi3{E!2120ESYtMb)Tid?h%kJ82!AwlhnPE3aWt14JtC_2nmdT)I?c{G~F*%KV ztWR!zc-JETpYSOB2jFZRHEK9Q2o9-GDal?$RYtSlqpabZH01d!!o|5215Uabd3KN#0+wzSvY^B#43^l3PN~! z3Wrq&<>`=N)sN*-CDsmMAbuq6;J=1pr7aJ2iV*S)YjGpFv_?5niVCJu`D!%ADDVN2 zzngqwS=LrAneQTko)sbx{0a?`@DTAy#%`~PA&R%hLo{t&2HbcshII{^$uhQnQB>#< zPUa@9%;-Oe<}_BQ3hjvGjbLVB`8cJd5oSKb&o`2ZKcd-ns>mK@DO=Lx`~cNcneS11 z>O|@EM+hGEj~I@~8cRzQMVv+CfRrnYaScWJPs2d^_3i(c#?CixHl}fUTJVB!&`-@Q zymfXt^@>?9b3KTKI3w&)&GDVzAP8PP;Fil&vz5A)zr_3`6fd44wq(JeOAsH_m zLE$!L*!q-~k5C~#@VWqK%|;(~bvzh$yJ1hHLDKmQ3bb*^TH?O|!MB2@c9kXx>j->s zw(v8F;6Pm@@S;U72~HL~(oiPtW8QA|h%`1gr%<$OAd<(AEEvDj`(lItrxB o;BhJi#`Vm%=5=m(5X*} zm!#92Mw^+#8+lHYpV*=zstSD9-I87yM3GeANW2XdwB%)PQw8f@ z(BAWae7A**B^U`W*@|~0ZD@JxLAZ@0(%9SH#iFES&P=x&iHHAgA~(8`x4R|VN6x1= zfk%YwNM#P(4d`eb(g~8pi?hY8pc$)1y5}|Mvm*qsicrnA3vZ*_4%0^5HsAf6fqo%@ z;lt4(>Oy|4WENo4>RTU8?z297WbH8J^x5zEebD2XrAXRqT=RuDGz%2*!%RKDvG`sh zRk9d}ptX2M#Z8baSqyk77KDnPdvUDpc7NpW-fenaj$Ib9Iqc^H{CaY|WEMJ7L^z>fHTq zoXXIqJ%eW``AZll9|8!r37PLhu4k-$yKf8LXMcq4_|z8mCV$J!W$!0oM}EyA=XRf~ z72$~7L+mpe0sHclrTnQ44Y2z50Y8KWLFV(TJ$~7H&OCiuO30esBP@gidgz04%c$pj$~Uq&A8E4L=0Wo zP;tv|K%Rp2NNT&+=+qqb6*};%1U!Ixj@F<2vM!Fk^>r*th=~@fvKjEkVO6eb7$37e zw1DnSPtpUB7Fq5(e-C`9+pwuQnFlOvfX>nt{Hi4;PRtgG@=t7ImGYwSuu?D44zvkb zZdR}_>s3v!AlxU8Sz8pPhvoX2PoSRlN${quOE()~y0jHHLfQLW)Yz6@Ht^(wPu%OGd)gOFfZ54uD7z}h3wm|w=nKaVIj@0?= z#AAX^LQkZo-^2sxv%ck9DLb&9@)RcisdEUS@AE@W&JPBOt=4ktn^+0e@4}F1*Y_Lk zM(X?8A$igcT?}|QiHtVXWvo;rcknVr-wz|$C*RM6)5ztw@b3F)35l7-@~k&KCiuee z>}QV786xOhCx92wc9=d;3)m5weY6)3_48=vI_J^pTg|~`a53Ju5A8WzLM`G|%{JFi zZ=esNWpsy3zlpK>E`je6xI*AN1jf2V`2G+rAv+F7Da8!ij9DfxaY z7Tt*YW#7Nw4WhxDg71qs#54o#=pb!FLbMGj(r0`>Xt(1uAYTHyX=go-BSkdR?(GiE z29Y{P!{QyHi-dwIy0{`7b!9;DP)5;^>gF2eLWcgYW8JsW60*l7=HzDhtB#`C5bfAQ z)V+?D+yNNsBw`i#eZ(qU+K5yfQI?K`cbD+>!g<0)PUI2uxUz60Ix5PN8I{BYVxtOfHNJwjw)TwcGeSlntKW`Q3xUXc}$PcT1LkFR|h?jwak#8BA^k}-yv zvKWEIar981f|4U>suC&8JyIJurc6zEh8JO2;~BQ^qJIKWT|xNOjf9^OhgJ-=y{OqY)+bNjnTP$>Kbwd*CYylybTL)a3s&5-4r>)Mgva2(ApL>&^dG(3TPPFJ$@6i@(9VL@_o9j zF)J1;!O>TF@4^f2G+2Dhjt^auxy`F|8|Y1!?IUh#A`j_1hYYR|krc-0!!=PH#aCB= zxgjwLfX*jd-Lw(G@wHMVrMVLp5Ylwl&`NtdKo0lp&Mt zvdQwSQ*{;1hG@qgI+vbfCR?}?2J;Cs0go*l1V@?4&LKc@n?-~~S+8Vzc?42fa72Mr zxjtcnqcR9y)MrPwn)1yx&~pWjK4-Gb@T9S^fe1Ad%cj-=j7-V}Gjd2_hfTH-T!bhV zJ2=kp2Ojwi?8%5@$V8VMS|aM7^v+}fC}SO{QkC^CoWgQsC)Hb6c}n)^I|fsM4q*zJ z8^jNgLFXwywA9D=wGXZ2&A#;%bI1nRr#y=>j9%r`$7vpG3dowR15_Nk410!a`H{7h z_u8wyb631pH%Yy9=>^`%;`A;anQnWAvg5ZkH=~a6_7GqMV2RE*63Xzkvk^2RMI_P= ziU29HHs~eZ43ywRl%pLY`blci$xXC`5(tY;nrv!@&mw;B&7G#}DEwKeRZa@Nk!oX~ zuj44SM8HTt1u*Ju0;AAH{g{Sh=^qCy0bWQn!(NxoIErTImZOJ^Aq6WoT0-icLe!ev zK-)kVz$f-^EGk1#gEsJyHk9{a7QvDnqUq;u+@42B6(bo9qTOIG(Ydxngbx4eP?CSz z5|h+Z4mM36Cpo0bi9q()QJim**BJzj{WFC*Xn84ezs65Uc+6KA8 zBZS}RxoI&iH77)Mkke58sC>ZakE#Yh_v~u#?7+Mt7a7!ddVGGVH$6YUn?+XhV18Ek zSySsktrt^zPhljLIZU!1Mc&e|hCo%2MI;@lE+DeplHFv%dv_qBnIPgN!c(L|lu3Fw zj`)oImK-T0e~=#O%5DQjSji;rs!$pS zV@fE%-eg4y&QvLZ3joiUYEysYr*vUZO#2n4lRSWRW|HT5?}|D16!POrgpeTw2ldJE z)Zi%O`~eB)YSjF-AiAEl^J{fxkM0PQXz385Mc)MKA!4ZZu+Gw7Xv@u%o+S?PrnR{4MC z^Ru&xID^~2f}fSzMKwvMd_drR0G)@XA|1=V>qwNEP3)qJrn-qlG%+OAMRF^t8I#QE z0{IaX9J8p#821O5uU9d*U>rMzWr-q@P(LDYk+wOvU_>&nuuPLw7t;A9Q$5LeN+C|h tVnY0gcNoi@do@e5uaOTh%%SRERslq?hHwpiUbd^}FIJ`|tCyyx{tfea@S^|# literal 0 HcmV?d00001 diff --git a/test/lib/__pycache__/dump.cpython-39.pyc b/test/lib/__pycache__/dump.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c564b147a034a30d24cad0a5ad63a51773530dec GIT binary patch literal 3543 zcmbtX&2JmW6`z@1lFKC}QF0>JZrUczN6V&`k|rsPR#lY5Kx(vAlQ>S%MT5n9XK1gy zAN0(!hzRl|q4mXRfnIa4kNTJPTJ(@}Z#^{qy;+i?B>T{0iFurPGduI%`@N6ZXl2D` zc)ou3@A1FdjQx`a%a4n}9-4UwBAMhV3+P{@JQIQ7)OS)Ra|1VP1dXg2G_zLF%Dlj{ zb3APaZLD!qKl20M@)~I;>jquhZ>B5RYOp$6yB2glWU?i_V{Im`3lmo{XVbUkD~Hkzft-7QKsalqhYEl=e_)}YTV9~BCougrP9$IaNXY~roWc3>IE>Q=qaqBN0F33)zIW|WrIhJ>I=9F=NuMzwy*Gf@UT%s%w;!Z3$s zVOaTLm=$uAQokF94@Oa1&uGG^S_{J{&x_I?W5V!+=}X{S@}KM5w~f+fJCaeh{gEyX zR9u?vI5H;6rH-ztFNTGNp|(q9%I!3{vn@y2aBFy|yVzp|&Cnq}_qgR;Fv!A9{(@y^ z4Wb$YafPX-HB3u+wh!Z|wrY^`d?}dAGCdU6AXzf^TkEDq3of3fM#|;N;ct=OCg;{&fl(1MadtFiFm^Qzy!o|@jG%jyT#sQX6p^c?eCAE z61={<9|&EJ8>4b??K(WqQ+Zs-B){8tDz_M_TtAN^tb1l*n&ir0!iiH;3n&Ki6j1#f zEp5Y-vv824>Iz9n7%*hCz$BR+McGj&V;kd@-VITHG(57~hWPGjOu zFt%XDczN+~^vE;bLNjE-l0C#mH`wR?lug)Y!1#ejDC-w7sVSvWZn62}@PzAEh-)>r zz;%X^HmU~sRZP|T@~%66{er$$(XE}dh@;f(ZY{kz_I5Ir75Z?u->lpe*{$*uljNq1 z@>uDYuyM8e{%9~zTFFmTB$cjQN*NZGexpXjiuNOt?=lezhFa3ss7K;j0AEJOP%>bm z+wz6WySypJn@<7=VFz<) zq1o%;09-ye@4_23+;zUbuMWdOP9A7%O5x*=mu>aux+17kUu;&lV#}v0VG;Nt&Phm(tF;=n5|Z6_wF`j7Kg0tUAo8yW zGcrl4a-^GWj4W3AP3m1*5F3I14%Pp-$cDSWqM5fq#y7vMGK)Qt2%Cl~s*D5|4Iy%QYK9ug4*0rLl$C=zSDyOb2@hJc z>kdk>1&uP|IlT1Gz}q9f1?t29OS#$hr;)U9I$%H*uWH2iq9m^qpmm2|&{3p!U7R-0 z8Gg3oyI3$w-J8z%?QbF!bf(*tUIjrIQi6q06knPy9GZ@(1+gMoH}K}k<84gVIhl9; zZcA{NyZ=2~GUrPX7mcF4r}UZHPY@BxAJ9ew^&%zMGa;On~T-Uyh8uu1m331!C;#AG@aENP* zrW9)L)K=d5d@JQkG(i`_V10<27X(!!6UO*L$V;n2@X7`L7TxTy6yF%8F5X&qXp1Ws mm(JX()|0e&nu-;wzWof?CFBg*&_gkPL3pBz|1MHM&-)*~GBU#e literal 0 HcmV?d00001 diff --git a/test/lib/__pycache__/dump_tests.cpython-39.pyc b/test/lib/__pycache__/dump_tests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fd678af6ab6d22a176f8b72687d548c272ce0c6 GIT binary patch literal 2967 zcmcImOK;mo5MI6{ntmja+oZ1~gj2K-+FFU5q(zY!L6airp+b--1R<4wGb>UQDY;cplmbV)Gdr`7`Mz1w)m4YUdXd> zooQ-Jdq$c|e?d&rZffcB4?GcJARi5Tg8-G&j}Dy&@XB{UaN=oZ;Z|jtZ8VozzZatS= z$QEU^*Lou2Jsu>o6;LT@p9%T}e>I2&tf_@7ZAIZu3#0hhlbeIrsfn#RCIG_`&8a!{ zk<(FTG_@ghw(s|8kNbY=_D!wB0ezW?)(Mq`Vx!C9?nN?=%l3C7S8cV%m;A3(G8 zOaSx|sAmC$==Dry0TOEIg&%1YGMa>OUoCS6Qb-5|DI`PzsYC5pKOsl@AvrdfeyH!6 zCqNF>Gp2eLGfO=y@39(8Uq5^7xY>iV10-qU?oU?acEdiC1uU7{Q@knQnS0Y;feL`O z85mJ_9*n4aaO47pZY$xGAhv@x1o!-KyL7nAOAad7ox5Lsodx@6W=Yx`L|lIE_T!}3 zS+S!zdkEawNrUnZ5L`J;E2c#`I340`nVzR1U7e0Xy;7Hztvh9F&~DZgR|O(2wKEj( z)CTr(k$m4z8 z-PYGM{Ea@X&|~u)@AGN11rZmZjm0LkVJ1++606`idi53{e8HiX-`{u{i-Eygrb3B9R*%j)ziLQIj;=;-@;IdMGg+T zhKD`+PDBRw|82;lxsz_0%nGwa-4Q)|pD`uuA1gJL@hm zcs>$_{cs;{J)T|Lr8tvnn9rjyq0o`@6^|D<2Zy2%;jK}|IGa#Zf#cLv)by4VhLE3L zJ&Ap6h|Az#Nn5(ER$31948}?I^`<$dQ+E(HS0}j`GqddRKj%l_{VG00fkc@(6mb=` z$P(|O;JtXidMe@?4k%!Yh)v&BTd$61V9GaNhwbC$uU9r-^_fGK>o}o{qL@{+6j?q1 z!&7+2WYO$P0yWKgY6C9u`Z2JHZ) zlZ5?JyJY9MNx8q!Ua)m9sq`1yi?;43Oa0~c@^sym_6p_&$+`Y&d)4-Z$@%_TdyO;2 z-(|8S%MX|=AB$t&eh%#gSwXv^R(cnXo%Zum?6BJ6x8Ru7cu|s(iMmOYWi7Pat#s5! z8E!}B&WG_`MSWC9xv-U}zDmumR@A+?Gw9w`W>ZS7vJ4&P)M1>NINk5;#fj=fdcRA1 z%s(C)Z{y3p1W~NbC2Nb1m~u{B#lC|4l=zB0bWVBO1K&bA;M-9?xDHgPO47Z`RDjY$ zSyl^>g8#LUfeatGIBP{Ms-U;D$MH`nOBgB3g$Eu+meq=^e9h&eTzcTP`WhI099 zSFXr&=sPc0A(ituAYTdRH)r}cM80r|__LBEl!eU-#%ql=y>l{#vuDNBvbeTW~^$~T_O zdG-6|42MbFjZ8d9p`+kH=?vAb0Q<9t&gA2J2VYhN8M7Wc;iv3KjG+(7Efr&qy7Q1u z<+k!mO#=-p(|pWlea2_~5wGWNqZjF#DBLvatHMhLpDJB*3O7^9o?gHvZ7-prZ(MWpA7$XJ8Of;M+}4Ah>YD637H=ArnPw8-Zesn;$izw37~U`3A&m3|ER>N?GG6BS z4`;%LDxVU3gZ^#Rf+Fm6h=oq4D0e#jfgHhXJ4L0_xi^X|b7*;+?{2kri}KFyX6xGK zhc{bWg_9YrSFwxbe2$z$Us0KDOFDiIJsHV1*EZ+YIXGb1q`DIhFwi1jz@@87?7W){h9C^R@((1dRPJ|RVymQJ<#StIl zFgV0ZwD8!91RFT5ynLncT5Y3BH1|ekul{DuD};t!_9N51Q}|MK2NF$aHXauKz8;K* z8CfqpQ5C#@R>|dIq%+l_(HgNdF}e#pug$biMz^O2{SFyh<7zVKMoIP-Om8;tne!1i zo~OkwgY3fzKe<3B>aowPGafMQj`?52J$Gkfcwo;vh>{W5j*aRQyP~mPaPtlDyC0?T z9z12jxHZi3)dctqxEQ|G#18+)$G(@}Cv)0C2Ad14wxc&+Bxqf9ilhzVGA8O2#gUR?ZurWJ)=*ZI8*)JXWHWy-JpD_Q(x2w3aA{S4*v46^GE#vos zu|G!S=!J6WPY$HB+{&NdF~~xqWRR*VxU3SJ8`W2`f`3}Lz33py-8%G^hxPj8xcRwy z9S5x=njgenRUhdjzdUVhU|>V{V_0Rgm&&*~spmlx1E}YV^*ZEIpGxd0XCYaWMr{aK zL&ZX51#V*F;yf^NOh@?KA{@kNQO-uU z;Urxo(uKEe(azu5y>|1X-C8iQ!fP~fi^z2%@6r;%)M|yia0#ew-~ZY_yZGs;hj@J(U-kyb?0Q1Uy>fdVJ|EYM z@C7!ll~5~7fhz=D4+;N*^x!gnu{LK<+mVj?lnBmn@@1@S@igiq2ARVNa*+gP4=TeA3(gS)|=qhqVA722RKz1l> zp0>m8*?!6$DU(Ci7T*72mbD?Al#CeTzBCpHD-LCL|2J#nty;M#$61_0WocJUVlKJe zMD2v28dv6tVkcT8sUmhnaZ>XqR!!2-za`Qk@)IJzAo4`HHU`7MQv?t53C1!qKT7aj z=(UV@m0uJUioWwf;<4Zx)2Xdm<&5=e%OmYsIGgGHBD`sngD5GyF0M>>i;`VLQHl|~ zGe~W?v#G^x%$Z}+GHdOR_-o^{`5;n$p{b{Ku~>L~%T}2tB-6hkLT0Le1ycC94#1Hn zB6yv8T!pK+b)-TJGV3H1mdzd9eQdaHmk{O~2+2y>Fs&6$3ozVB(Mz5}A?UWf?o4h< tHo>y0njX%YCh_=P$0@?AlS|QC6isbfTA^HL`c{A(ctt3P3cm8*{{V<=N2ve+ literal 0 HcmV?d00001 diff --git a/test/lib/__pycache__/net.cpython-39.pyc b/test/lib/__pycache__/net.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28d6d231f01a296eaedee381e8a291f3fe895784 GIT binary patch literal 564 zcmY*Wu}UOC5Urjb$GOEt5H2-eP$#}aL@~H*5!7UorkN`5c5k+4?dk!Q%M^dYGZoDJ znof=U1tY6hg|(nwb#)i7-kY(*;Q%Bao^G3asn4ldj$E33N!}DdK-EGGzW_7TH-MU8 zs;+gNp*n_9{vtClZW1nsmS$g)w*&}kg-~awK$>z?yFe>aGpQQ&e}y@&+~jqEg)pwh z;c?gerEq}gi8tNcOqP>x`TTywAuJo z{TyXJ&H3a|xN@fU>=Aq^?dy-~l*w~t(ax$P-%Y6rTs4;2I`np%E|LQBRUi&kA0{nV zj>69TGOilK!5fo!I1>PM_5F*9osWsJ*ei{3c1Fg;fibfV`!=1|j5+UZ`|+ZMFRmM7 zo$G=Plg_cs=!pwH{kw(YCHR)^(&A){3xRrgy4`%_;^AN=Xzh zq))&VD$!#fh8Nf?Ctjf^{h>>nc-uFbbrw(gfe62~F!F9EBz_?F%t#na*p`-}#0AL|@jOGwt8MNxCdUX#8il4N@3^saOm zb1DiEaV=;E%gssh##wTSHabtvsQh3j-X#<5oGuLMM|M6d$y-Xibw0_`^MsE(u6&xg z!E90AX$qyqv`IbIG;JZR>p}?G!FQl|(jM&pAaNrNq#=}lA<-$YC}4T$0|P|Br7i=; z;{_dF6b3ZetyrlyJI|7K7#eEq@W)eC8Kh z+x@7aNDI{^b(>)W`lcOb*u-p= zdx20CXHnDB)6?D4{r7)-|KE+`&`?^z@A&t=QA(Xwlz*g${-1$|bGZD!P!&ZeLiH3| z<-2BUeAjKA?}lygJz*#KZrUc_lXjBtmTmDpWvBR_wljRs+CzLF=DX&NR7dSmmA<8W zW7RQxtU7Lw^RwaYsZQ7voKJXrtCRL5=S^>4b-%rz^GPpVJzyWG9<&cu57~#RQ}$H# zuzi@{S>BQAQTr(8RWE1f_`ApKV|+huAIClA-BZ2SzL)Eyz5A;7+xK%m<2_J4VV~f9 z*2`BPv>)XB5Xw*5CpoWr57`f)k6~}RI&DwKWd*x{vJr2lddfbrqAJfQVqEOGrHDQ5r?+(b44x;%UOex0590YVc%Bsd@Vt+oA9Wvdr`*rDk1lHm z6tVw~CJu;$w+#Dn)H@`mP;Y8WvwsRths6;*9l_ICaa81PDfSaWT~PAJ-lKJ%%d6pV zsa^}*n?bc$E3UXQuZ5N<25wNUx?ze^;d((46%%W+*zl1VT153r^Dp4Bfd*=5Tqy>t z`9x@3F8e{4o-3l!Vzc47p*7zK%Jo{&3lrCB$b@Fp7LBZ?^cqF!yVSwWPoH`W_arr) zzjOgvbG7KNy7W0+`trcre-J^37($xxQAE zD?HkTdg;0w%nBi0-!IX$`+pi9&f)SOLE!Ia1V~?lZelVOt*r;fhPH)g71LW#m@*ri+W`p=E)IGPB z*X1F8?U!39S!yoxJfq|&-pFG}rg8Zu5=G6bqiSn>aJbZU3Hqt~>kM@sCH^E58fpbY zEn{HcQolw61m&XvYS92wj^jcMq~pBP858kOXhwXrb6EGIS18O54ae2izQI9tyZcE; zMm#YvGOU?< zgFrHQ@>;x3TtckaTcR-IIB{Lac?_>QLpiXMBHdbZd5c)F24Q!cFzq4Q8BmFN>bb@I<$)G5g%h2COz2Vj*aeAm1p+S?BGiZ6j1eAB+ zhe_B1Su6!8MJ_bTq=ggAF7i{TZ4VVADmk)Vr~2_@Xja|ok}Lbxl9%xbv^j^%C&q+! z(#BOwozx!ET1R%Lejy&fT;7xgG?6ouoT7wBJK{LSTCEOdhS~5TM)ZZgul6|TVZ5N1 ztQga(rAaEei+5=0&QZ0Ok?g`diMHt~Zz)1^wYQYF^=;Kox)%6JzoQCRl3T`>YNy<^ zn-L~T$f8h5Rt&ircSxkcbs3Spm9U3J3X3!x?z`$%>w$YW+v%{Np;{ml85CCr*o$V~ zWv{Q|A+l&-Fz9Tdg9(&tLSZvY(vF(wQco{gPUNrwz*;8ZQ?Gbnj2s$ zSjGbiv#7bDx04&DND^xbrK!r70}IdA0cA_oz<>+ND=(9J%ctfBh)Tjprey(_Ps)r2 zTUIwA6K!={*@VSxYp|Q(@=XTBRx-5!ZwJ3tvl-}U9sU@Y=EN_MEc zQD))*^4*j!nQk6BbF5~3WEhs3)v{Xav5#4~0!HIt$qHvZQYikDg`NGiMtX*Ox^VKP zLM!!B0Ydpwp>VQwb~OkZ{^K(<#YVZXQVv#|ONCOsIwRdi-S4yb{aNXHF3oAiHh~u0 zz?VOTdD#<7vc3jL;fVTL&8ruM1D?vKLQT415`8vE379AYmI#y5eWeKv7ba0aiXk*A zJ}{j@4a3xO)AOpuptLGUspXTQUh;hTG?9-iF%$uP*nzHuil~Ja;8+6^>Dt+NaKI+Z zbvwb;c2YbUlk9Wo$sb0dXjXPYn@~?^Srz8@=qHdmuSx=V1AIrlbRo?+9s(s$+W>#t z(2zo79jVW?Chx{q)9z_EO!t-uIEHmi5{MXj-w%hIMAQ=4%_T$7H=C}y33I|-E;hZO zNBeI5$Z(V~q{x_Rtw%p@exK9{q09Z<)!p%hh$e&wIzJN`QXC|c{`Zk=kSHs1v?GfF z+ovsnK7xCd1agqT4ZUI>QedTwO0r@>e%XG(<{0hNwj$?XccA1J2xny4S;#DG9Ap>u zktW_wkS%N3!H_V~5-Rbw{>GzUQG#Lm6RA7;FDlEbNQ=y^>}Dbusf@PAs0UcS#5Qc* zT`KB3{VM7OrFJl~f*9R6`p@3R*IrXzRBGBOr8Bbp(A**caac6~K|izroczG4UvFhE z#IjMYt>jupE~w{PdZADV6HDvB+3&I8)NP{)WNoJ@b$m#*Yg<(U+raDZ=yqxeqJG^e zi<@CO$};3lk(G5QdzaThE3k)F;W(1lfna$ZCSE7d4JECjjI<)Mq=R7utDVq{s2nD$*C8FD$+Y&_+=ixCUx6PABmr$8i2|Wst_AWcbwPSI z%s~ABQpvIl>}57}Emh-ShUv9wn1lgO2ae>buNm;ht}SU*J{P10;Z zx-bQkbeU%^>+~X1@}LL_Xx4?asU+(YhAG8GOi`vXffU)KgeTNX7*CNdzKn~NCV`p> zn!-L@dZec{z*izDtH*vfiqrxzA@#sGncM`3I}C9J0)AbSM*=N?C=;MuP@oQwgBjN5 zLh}1)@k6ub24H+%*@y|KH@Fi&hnoy2860o8zXKPS0ke7K92iRs@HQ&@q?)iQSdXkG zUq!w|1^qu7@i|-!Bs%DfaD62qRA4J$dLVj}kr;XKz$@_NEn$fi?(pj|gG@NohqfB! zcc2#t{UUx4W_;gs;*iN*GD{?5a_#2nvsWj-j4H9)D}Nq&`5GmIM36a@{W`8b7kywr zySwOQ%etVifrkKIT?O_m(zX#!Lf&BC;z?+50(sCSQ3NW#>`nNZu||G(8&&(hA9Vd z2`1U|-a-TR5*0NE*+xu3Ywg=X-%i~D_noAJ_=}gA+)MydfYt4Y>suoP$YyRlJOiKx zY4)LWe+iXvW7Rac#$g6k00Oz@w@|I?HV=-Zuhsyv;6)D?90w;99@cH=;d*BjWdSSiBwH)+Cxhv-w__URbz%K8h+bD>E4+ z39x*Fk{%^E$9n2Zc=1hKEaH|rZdfTe?^IgCHSp^?VE>BZj);@<-Zt3rQoD{F#=a@T#Sk_sExi1{=t;MApq^eeOD1>uVmE@c2_c0ptSPn1# z09Ry2Ao!*R6MpD!lJ@j#zdgFdzx|`oCGTYh;#$hO%I^6o9_9VRLeH-{{dp|k&?E_es1KV)f+XwOX5a*_X!|fxS zI~wJJoIrr$wn_nm$}!YCj_V%G_}$%H;Xg#p#Rsq0^ay{2$z}$i>Z@4md94+HPi?S&2dD_#NM+g9#-psN@K9g_qla|N z5g?2mZ!0;qCPgFd35lH)08hB@*lg;Qjv{{O+`vK$XUPRyxRNv*e2<`1tFPH3z1PKM zOjMHY3x}YJ!E{i{KFEcsM&p_FP=jK`H(VxC^1|E#Wg-n{kgmo4PkE}FkG!n3od zKYd321`{tHc(hz^qDX$762g$-UXI^@d&R>Va62{Y9Sq=4aQOt|6xC2QRr|4- z$g0C|p-lSw@P5;P%M}0pC~aA)_K~4$9~uesM@G{8ZzE;?r;!=`cVlQm)qZG>SU)hv zsQn`V`@?E$swX5J!&m59gT(=8MEIbKgZ~;2&<%iTh)<9vhQ5YYKC=nL+lgiH9q|ca z+@ZK*9Q=fiL{xo1D^X1SO21Cxo)d>Cs2W+eBucDyN|>yfNY7KeL=nkDu>1n3nnJ81 zg|aN=R-!TB9ZHfxx`F`kZ8%cS@P;big?$_$`xy6`p*~r|W#?KWYwprC@bz+erLbPC zA`rZ0kC8e9=VDpOMZqgXG*Yx)kekW#=jwJ+ek*OF>^Dl0~NIs~F5qdKf-F zGy*gfJLg%*E8<9f4prY&>y3e#k`qIyiV@Y7yJp(fwiW*}W(Q=?`vB3l!nq(JI0{)Y zJMn*Eyp2fYYbsEcMW_qAVcFOtZRKCU8WSO)xck_f;&@9gV%%hpARF+1g2 zhjX-t3NkgZ!_|OAafP>@TLTKn$x^O$G`F@2k;oAzb-uzK5|an+vB-X>dQ=UqvLA6_ zL|>y(_dNL(%s$i`Wr1@Ibo3(Od#>B)xM`X!?V!8-Lp0b&WX~c)JLBLtTnDw*;a#|_ z$NB>cK!%D`C1DJoJZoYlR+WhI#Hx}41z@yJpx}8@NZ5=N3|tA?GKAL@w67FPkwpprY2RUQ3fFqdm%OpPKkKh77NO)Bli-(wYfTu(Ssw19oheRT*p#Wo=dJON>#%Vd5U3);K~i z5NhhI8jH;wzDIG0Z3V6dLh+G9B8*#Dr3)}DWx#WhxTCL7AP0`_T?Xx3zd>W_C+BSa z;Zq3LbUu!Wb$^|}!vacpwk!Dj7qYZ7hZ6+q*R@v+_-{lh?75H{$F#JKdv$qpb3~ zC_YOfKxe%aRa1|s1svfR#u1JQqyY5VFvVMU3%Yd7;(!34FBpXwB9P+i9^Hzx@h*^r zPYcb{AV^m!yZa8k16tH+=SO*RA6TR?7>bs57#!~5Dou=3 zLbMMu3hS~fH9N<~^6Fwfw=-6|MuWOZ32ipXNlKoggyRIfw?vRxeut9Zr-V?3B%X*o zP72IMyE-|_k^d+En38uXA#5Uv_^eSQFi$W;W+?f4O1?|U{ggaNNzbx&t%LjqUi=O& zzknpIPGAQv3+}OKCl1!t(yfn7IFZ?uHIqJUSyoC@(_>Rocet2g|7`Ji426>!{iSs&CbMiJ^ zN(%qIPEKG3d7vITj1?dZ9Sk7=Y~4xl7B`(M9isiTXrB!sU$}aGG2h&5beO6&F+JVr zx^UBQ_$bB@>Wj0_AVQRcYgfEclsjt0J8LcL%IxB^Gf)bsnual?7#55qj>_nz;AS}9 z>)7dZ++P-V!(mozQU zN1&&&1NLal@Zo6ZG;Sv*j}QF~nucnvkKH*i{kJIoGB{NJ9Wt$F$hScBuIr(9j|mC* zD+uk?We@kzp=@k+^$L1Gh`o30(-6z^7BcUc%8(vuzc?J?4`Ju5-js+Ekj$`fhE@#` zA+kzxFUs&=`aZ!$xno5qkU{XO+l(a65_|w^2C2g z1w8v7P>#KXRCm?BKt#ylMHC^USeW^Nfw-8NP(ENnfEd;XlgC=eKbZvhB#j|NnA!7+ zOCHYA%O9a&2XBQVJGy#_2EjH@op?xoh!*)L!wl`(7BGLr$3uf2WqaQ$7Lsa7>O8$!yaxUyFhpFo; z5SrC>4prNG`-=+nj|hc&Swz9=BLnYNi!JO26c8Y-!qvZl1A?Q}Xa0Y$R4DrERVZXx zE^&aKqwMnAG$gVjl1wpl#S$MEGdi^Ar0-;SLWLtOpqm%bh@OBDTII7VN=b{D5 Z;Pc|xqdvNKbYygPe`E#5$gd8I2@24PJKE3s78YFEjvJ z7KeJNJd>BEO=tR+WQOWn{z-qszILWF`3reT&%sir<+y35O9Bhv00#%>obMcL&}dW* zc=nb)5C3)AF#d_17atosx1h-Ph+!~;k=UTcn^05tEo$kvO>O;lsH5K{TGH<_E$g>S zUHz`mihfsV72alCOX{>vkf#+lk_K()wjD1di*zw*(x&b^@u_5qF6nkDKAkMnW!)~v zm1Kpk=(ZcLCaZK+w=40PL4nGlIS8kNBl8j3Yul zewNi)mD$KUjBFXL)o)Or-4@AfjQ4_3tXLE(=(;RWTtx|QnYrCbM@eo6Bb5QAJPd@2 zf*6|iMxgrmUN06^dm5g9(=b95_{9fIUpT>8XmEoPW>Aw`hX%EE?dXG= zdA*aeVHTz8LB@EuRmxr8PlJT}eqQzcBx9o(+jZZ6JPP81A{^kEuWWXhUsUqdZP@w&S0W1K2iw~lAAr0u z7vuJA4~6n@%mZ6%m3qBQcyk+iD)VG0f+26`HfR>qZHhA>QC<>UjYJxnDCF$1;Enw< z6uAo3giH+IP~^}Um=p8Q2I2%{BHfl%aB<{l$ORfON+ad_Ei1Prk9!5x4#bZ9n&cJV z4`WcY?-%x10oLL?R4v>{YYOx@zVzyYwvU;I=wQbWwo%@@P-F_##2A2fcP+6zF{Xr> zM}S*snarBCtRs*gX4%Raly*&V3)`wP zXRjBG(Dpnbfd!l5dAFd*KWGUC zWNIqdTR9U0xSJqNX;7Y!fr}^n%sc>8Pb;|Fc==;Md$BW&)o?6bdU@n(fq$#J4N0V= z0Nz}IF9eV|(pEjU`$`S-5`@_&T+k{8rY+sho1O0Z#z*(N+y3qKb%@(r0`;XWl2=q; zfR9G$PF~v9P4r`6;X^Eg#3OPCQj}2CmHm-ofPmZ{K>C=)AnM?*48+lEU-nd2j8YxE z4^SDg43z(>( zdah!HaA=h@3Uf0|sJoYmU7tlFF9$`2rqy|#67S-g=UJ~ntBvOnK#;hd0VrKE7f=V* z_`(mIyxX#00;%}zN$z?djkvdg@ED3jJh?<*|Np|;>vyv>90>tRoTstlONvBkPdGhl z9Gzan?(fj}0oc6^MC$|)?HLg584#W4eM{jT(YESUTqev7Bp9FCPlHuem1qss>92rCONui=19E^LxF!I7U0$UzW zm3T-b?THK|dA^V4s8te$I_7(OQ5@^@Y8gC%xpI6C=S4l7!@ZdXQ74>rTMObnm?D0T z6}pA^1y;Yr>Q`8y?~7k!g;%mU!tzsaH37DpK_t1{dBjhFK+AEqvjZMN zO9DJ5OK4r{Pmb9fZG<*V%x(OVfmppigxR?ZeyC6pc`eN1c;-q1w-lD1&f$Yc;0>|( z0I2jvDFn_emO6ppCy?qx5H1Xkfk0HCq7{9AZ>MlS!w*;5Ec$cf6y`^lptVPVj6zUr zFWSkS_0FA#_hWRPB#vNPvNbPK}aHLPGam?P74+@in^D31&_JYY3|2?vunjI*N1TkJ?7cKg(} zr7$ARMP0)n+!N+tCwr(s=cq#;m7-A6OeZ7NcZL;z~GRkt52>l?%lTNqQZ9j)u zMPe#R%x)9qDK6q*IhMRd$ndH(x=72d;9p%a3DRno&nXYUD=E%0U zALaY$fhvUM6F_I9+6bcxRhqibbO_(GfmUX!%aomh6o8J4+wj)Ef-q!Gmvli5ol{Ad z?E5DRX4u%9qXb;vv4v;+xo3hUt8bW=^h`MSmUP_y8KUqDXiun#aOMX2X7Dvz@P!Zd zzUhGd4%qv14weDz@>jb+dqN-W%;DRxliei@;?Ch&dX(&w4@mvf6++!}52S>-erwlK z4Q>y1Dj}7g4RlqRUN!2O1J$dYD|+{?f3edmrnTwaS3S|oAwGlNEie;3O9!*7|8Ir| ztvm>thPc*)-e=&_-Yn>4H5}3JYIL9F)kKw+ABiHjJO?y3+7~0$;IdIxXC)j}rV3U8 zfsO8ne5!70U1`hB(L~uWNsvlOV%w^m7D_|ttOsB12i8mTx``hh*`PseX=Z8l8_^fx z`+t5syj?3@4}}!Pa8Flbm702(0&hhrwYaPfCzUoz4~?qLFwgD}n^OjpBikP7sz{JA zTgU(ugoUh2H(5l_u}vD$i)?mw#je51zE3r#c8WV*HVO)|Ya8DB90)@UuxZRBJx>;7 z4tf1L#?IS)fu^(02@rH$-_w{L=nI!WiET}UcKoGrh|m+6X?NpByouIlr5cH8Zj$CC zcaux7P~%yIt<%{Xe`9}z?QVZfBTyS9Nm0ouByM86N%Cka@+Kq=s*NNOWm!Q^WmQ>& z4K67_n0VFQHo?*5IwXAR-phb;jt{ z2BU7}A07q+0(G>hnO8x1Sw$p`p^A)sc%g=jL#!fBYl9X^_LNIc@?#!%qz@4Y?AFRJ zdqRuiE8wA{S)W_~F%s1J;D1p70nK3(YUfi#01ZK4{9UN;fP#esg3a{$9LngdYv?mJ zr_bqK2skIkJKCdU0Y!XvaZa9bC<#!$=4|dghZMg{?m<#7__w@_n*Iv8(xf@v?FSlZ z>3q?+wSDt2RTGSa=C1eHo1hD25;`RsV9gZKYNDSZVVfEIXGIU^xeu zcabj_{_B^)$%b9XcqEH{8^t?#I!BV|cTx4Omq#}I(%}-h_0v2{AKJF_u7dQlt?jBb z>d-Vd`4G=|6&v1%1w7DEI0RrCXZl+y9o7bDlT1wp1qTb8!+CN6M-RmDD#$jx*5uA7 WnAI-vL)wKW1TH&`b-}%x7ybh8uKF(k literal 0 HcmV?d00001 diff --git a/test/utils/ccm.py b/test/lib/ccm.py similarity index 56% rename from test/utils/ccm.py rename to test/lib/ccm.py index bdb1caa..1d41a03 100644 --- a/test/utils/ccm.py +++ b/test/lib/ccm.py @@ -1,21 +1,27 @@ -import shutil import signal import subprocess -import time +import typing as t +from functools import wraps from pathlib import Path from typing import List, Optional +import click +import cloup from ccmlib.cluster import Cluster +from ccmlib.common import check_socket_listening -from utils.jar_utils import ExporterJar -from utils.net import SocketAddress -from utils.schema import CqlSchema +from lib.click_helpers import fixup_kwargs, ppstrlist +from lib.jar_utils import ExporterJar, ExporterJarParamType +from lib.net import SocketAddress +from lib.schema import CqlSchema, CqlSchemaParamType import cassandra.cluster import cassandra.connection import logging +logger = logging.getLogger('ccm') + class TestCluster(Cluster): logger = logging.getLogger(f'{__name__}.{__qualname__}') @@ -50,6 +56,9 @@ def populate(self, nodes: int, racks: int = 1, datacenters: int = 1, for i, node in enumerate(self.nodelist()): node.exporter_address = SocketAddress(node.ip_addr, 9500 + i) + node.rack = f'rack-{(int(i / nodes) % racks) + 1}' + node.data_center = f'dc-{(int(i / nodes * racks)) + 1}' + if self.exporter_jar.type == ExporterJar.ExporterType.AGENT: node.set_environment_variable('JVM_OPTS', f'-javaagent:{self.exporter_jar.path}=-l{node.exporter_address}') @@ -58,14 +67,9 @@ def populate(self, nodes: int, racks: int = 1, datacenters: int = 1, 'endpoint_snitch': 'GossipingPropertyFileSnitch' }) - rackdc_path = Path(node.get_conf_dir()) / 'cassandra-rackdc.properties' - - node.rack_idx = (int(i / nodes) % racks) + 1 - node.dc_idx = (int(i / nodes * racks)) + 1 - - with open(rackdc_path, 'w') as f: - print(f'dc=dc{node.dc_idx}', file=f) - print(f'rack=rack{node.rack_idx}', file=f) + with (Path(node.get_conf_dir()) / 'cassandra-rackdc.properties').open('w') as f: + print(f'dc={node.data_center}', file=f) + print(f'rack={node.rack}', file=f) return result @@ -77,10 +81,6 @@ def start(self, verbose=False, wait_for_binary_proto=True, wait_other_notice=Tru quiet_start, allow_root, **kwargs) self.logger.info('Cassandra cluster started successfully') - if self.initial_schema: - self.logger.info('Applying initial CQL schema...') - self.apply_schema(self.initial_schema) - # start the standalone exporters, if requested if self.exporter_jar.type == ExporterJar.ExporterType.STANDALONE: for node in self.nodelist(): @@ -97,6 +97,14 @@ def start(self, verbose=False, wait_for_binary_proto=True, wait_other_notice=Tru self.logger.info('Standalone cassandra-exporters started successfully') + if self.initial_schema: + self.logger.info('Applying initial CQL schema...') + self.apply_schema(self.initial_schema) + + # wait for the exporters to accept connections + for node in self.nodelist(): + check_socket_listening(node.exporter_address) + return result def stop(self, wait=True, signal_event=signal.SIGTERM, **kwargs): @@ -136,3 +144,53 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): self.stop() + +def with_ccm_cluster(): + def decorator(func: t.Callable) -> t.Callable: + jar_types = [type.name.lower() for type in ExporterJar.ExporterType] + + @cloup.option_group( + "Cassandra", + cloup.option('--cluster-name', 'cassandra_cluster_name', default='test-cluster', show_default=True, + help='name of the Cassandra cluster'), + cloup.option('--cassandra-version', default='3.11.14', show_default=True, + help='Cassandra version to run'), + cloup.option('--topology', 'cassandra_topology', + type=(int, int, int), default=(2, 3, 1), show_default=True, + metavar='DCS RACKS NODES', help='number of data centers, racks per data center, and nodes per rack.'), + cloup.option('-j', '--exporter-jar', default='agent', show_default=True, type=ExporterJarParamType(), + help=f'path of the cassandra-exporter jar to use, either {ppstrlist(jar_types)} builds, ' + f'or one of {ppstrlist(jar_types, quote=True)} for the currently built jar of that type in the project directory ' + f'(assumes that the sources for this test tool are in the standard location within the project, and that the jar(s) have been built).'), + cloup.option('-s', '--schema', 'cql_schema', default=CqlSchema.default_schema_path(), show_default=True, type=CqlSchemaParamType(), + help='path of the CQL schema YAML file to apply on cluster start. The YAML file must contain a list of CQL statement strings, which are applied in order.') + ) + @click.pass_context + @wraps(func) + def wrapper(ctx: click.Context, + cassandra_version: str, cassandra_cluster_name: str, cassandra_topology: t.Tuple[int, int, int], + exporter_jar: ExporterJar, + cql_schema: t.Optional[CqlSchema], + working_directory: Path, **kwargs): + + datacenters, racks, nodes, = cassandra_topology + + logger.info('Creating Cassandra %s cluster, with:', cassandra_version) + logger.info(' Topology: %s data center(s), %s rack(s) per DC, %s node(s) per rack (%s node(s) total)', datacenters, racks, nodes, (nodes * racks * datacenters)) + logger.info(' cassandra-exporter: %s', exporter_jar) + + ccm_cluster = ctx.with_resource(TestCluster( + cluster_directory=(working_directory / cassandra_cluster_name), + cassandra_version=cassandra_version, + nodes=nodes*racks*datacenters, racks=racks, datacenters=datacenters, + exporter_jar=exporter_jar, + initial_schema=cql_schema + )) + + fixup_kwargs() + + func(ccm_cluster=ccm_cluster, **kwargs) + + return wrapper + + return decorator diff --git a/test/lib/click_helpers.py b/test/lib/click_helpers.py new file mode 100644 index 0000000..32c717f --- /dev/null +++ b/test/lib/click_helpers.py @@ -0,0 +1,150 @@ +import inspect +import shutil +import tempfile +import typing as t +from enum import Enum +from functools import wraps +from itertools import chain +from pathlib import Path + +import click +import cloup + +from lib.path_utils import nonexistent_or_empty_directory_arg + + +def fixup_kwargs(*skip: str): + """ + inspect the caller's frame, grab any arguments and shove them back into kwargs + + this is useful when the caller is a wrapper and wants to pass on the majority its arguments to the wrapped function + """ + + caller_frame = inspect.stack()[1].frame + args, _, kwvar, values = inspect.getargvalues(caller_frame) + + args: t.List[str] = [a for a in args if a not in skip] + + kwargs: t.Dict[str, t.Any] = values[kwvar] + + for a in args: + v = values[a] + if isinstance(v, click.Context): + continue + + kwargs[a] = v + + +def ppstrlist(sl: t.List[t.Any], conj: str = 'or', quote: bool = False): + joins = [', '] * len(sl) + joins += [f' {conj} ', ''] + + joins = joins[-len(sl):] + + if quote: + sl = [f'"{s}"' for s in sl] + + return ''.join(chain.from_iterable(zip(sl, joins))) + + + +class DictChoice(click.Choice): + """like Choice except takes a Dict[str, Any]. + + The choices are the string keys of the dict. + convert() returns the value for the chosen key.""" + + dict_choices: t.Dict[str, t.Any] + + def __init__(self, choices: t.Dict[str, t.Any], case_sensitive: bool = True) -> None: + self.dict_choices = choices + super().__init__(list(choices.keys()), case_sensitive) + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> t.Any: + return self.dict_choices[super().convert(value, param, ctx)] + + +class WorkingDirectory: + class CleanupMode(Enum): + KEEP_ON_ERROR = (True, False) + KEEP_ALWAYS = (False, False) + DELETE_ALWAYS = (True, True) + + def __init__(self, delete_normally: bool, delete_on_exception: bool): + self.delete_normally = delete_normally + self.delete_on_exception = delete_on_exception + + def should_delete(self, has_exception: bool) -> bool: + return self.delete_on_exception if has_exception else self.delete_normally + + def __init__(self, cleanup_mode: CleanupMode, directory: t.Optional[Path] = None): + self.cleanup_mode = cleanup_mode + self.directory = directory + + def __enter__(self) -> Path: + if self.directory is None: + self.directory = Path(tempfile.mkdtemp()) + + self.directory.mkdir(exist_ok=True) + + return self.directory + + def __exit__(self, exc_type, exc_val, exc_tb) -> bool: + has_e = exc_type is not None + + if self.cleanup_mode.should_delete(has_e): + shutil.rmtree(self.directory) + + return False + + +class DirectoryPathType(click.Path): + def __init__(self, empty: bool = False): + super().__init__(path_type=Path) + self.empty = empty + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> t.Any: + path: Path = super().convert(value, param, ctx) + + if path.exists(): + if not path.is_dir(): + self.fail(f'{path}: must be a directory', param, ctx) + + if self.empty and next(path.iterdir(), None) is not None: + self.fail(f'{path}: must be an empty directory', param, ctx) + + return path + + +def with_working_directory(): + keep_option_choices = { + 'on-error': WorkingDirectory.CleanupMode.KEEP_ON_ERROR, + 'always': WorkingDirectory.CleanupMode.KEEP_ALWAYS, + 'never': WorkingDirectory.CleanupMode.DELETE_ALWAYS + } + + def decorator(func: t.Callable) -> t.Callable: + @cloup.option_group( + "Working Directory", + cloup.option('-C', '--working-directory', type=DirectoryPathType(empty=True), + help='location to install Cassandra and/or Prometheus. Must be empty or not exist. Defaults to a temporary directory.'), + cloup.option('--cleanup-working-directory', type=DictChoice(keep_option_choices, case_sensitive=False), + default='on-error', show_default=True, + help='how to delete the working directory on exit: ' + '"on-error": delete working directory on exit unless an error occurs, ' + '"always": always delete working directory on exit, ' + '"never": never delete working directory.') + ) + @click.pass_context + @wraps(func) + def wrapper(ctx: click.Context, working_directory: Path, + cleanup_working_directory: WorkingDirectory.CleanupMode, **kwargs): + working_directory = ctx.with_resource(WorkingDirectory(cleanup_working_directory, working_directory)) + + fixup_kwargs() + + func(**kwargs) + + return wrapper + + return decorator diff --git a/test/lib/dump.py b/test/lib/dump.py new file mode 100644 index 0000000..da88e16 --- /dev/null +++ b/test/lib/dump.py @@ -0,0 +1,77 @@ +import itertools +from pathlib import Path +from typing import NamedTuple, Any, Union, Iterable, List + +import io + +from frozendict import frozendict +from prometheus_client import Metric +from prometheus_client.parser import text_fd_to_metric_families +import prometheus_client.samples + + +class ValidationResult(NamedTuple): + untyped_families: Any + duplicate_families: Any + duplicate_samples: Any + + # = namedtuple('ValidationResult', ['duplicate_families', 'duplicate_samples']) +#DiffResult = namedtuple('DiffResult', ['added_families', 'removed_families', 'added_samples', 'removed_samples']) + + +class MetricsDump(NamedTuple): + path: Union[str, Path] + metric_families: List[Metric] + + @classmethod + def from_file(cls, path: Path) -> 'MetricsDump': + with open(path, 'rt', encoding='utf-8') as fd: + return MetricsDump.from_lines(fd) + + @classmethod + def from_str(cls, s: str) -> 'MetricsDump': + with io.StringIO(s) as fd: + return MetricsDump.from_lines(fd) + + @classmethod + def from_lines(cls, lines: Iterable[str]) -> 'MetricsDump': + def parse_lines(): + for family in text_fd_to_metric_families(lines): + # freeze the labels dict so its hashable and the keys can be used as a set + #family.samples = [sample._replace(labels=frozendict(sample.labels)) for sample in family.samples] + + yield family + + metric_families = list(parse_lines()) + + path = '' + if isinstance(lines, io.BufferedReader): + path = lines.name + + return MetricsDump(path, metric_families) + + def validate(self) -> ValidationResult: + def find_duplicate_families(): + def family_name_key_fn(f): + return f.name + + families = sorted(self.metric_families, key=family_name_key_fn) # sort by name + family_groups = itertools.groupby(families, key=family_name_key_fn) # group by name + family_groups = [(k, list(group)) for k, group in family_groups] # convert groups to lists + + return {name: group for name, group in family_groups if len(group) > 1} + + def find_duplicate_samples(): + samples = itertools.chain(family.samples for family in self.metric_families) + #sample_groups = + + return + + + return ValidationResult( + duplicate_families=find_duplicate_families(), + duplicate_samples=find_duplicate_samples() + ) + + def diff(self, other: 'MetricsDump'): + pass \ No newline at end of file diff --git a/test/lib/dump_tests.py b/test/lib/dump_tests.py new file mode 100644 index 0000000..159bb95 --- /dev/null +++ b/test/lib/dump_tests.py @@ -0,0 +1,115 @@ +import unittest + +from lib.dump import MetricsDump + + +class Tests(unittest.TestCase): + def test(self): + dump1 = MetricsDump.from_str(""" +# the following are duplicate families +test_family_d {abc="123"} 0 0 +test_family_d {abc="456"} 0 0 +""") + + dump2 = MetricsDump.from_str(""" +# the following are duplicate families +# TYPE test_family_d counter +test_family_d {abc="123"} 0 0 +test_family_d {abc="456"} 0 0 +""") + + pass + + +class ValidationTests(unittest.TestCase): + # def test_invalid_input(self): + # """ + # Test the + # """ + # data = """ + # busted busted busted + # """ + # + # with self.assertRaises(ValueError): + # metric_dump_tool.MetricsDump.from_lines(data) + + def test_duplicate_families(self): + """ + Test that validation finds duplicated metric families + """ + dump = MetricsDump.from_str(""" +# TYPE test_family_a counter +test_family_a {} 1234 1234 + +test_family_b {} 0 0 + +# TYPE test_family_a gauge +test_family_a {} 5678 1234 + +# the following are duplicate samples, not duplicate families +# TYPE test_family_c gauge +test_family_c {} 1234 1234 +test_family_c {} 1234 1234 + +# the following are duplicate families +test_family_d {abc="123"} 0 0 +test_family_d {abc="456"} 0 0 + """) + + result = dump.validate() + + self.assertIn('test_family_a', result.duplicate_families) + self.assertIn('test_family_d', result.duplicate_families) + self.assertNotIn('test_family_b', result.duplicate_families) + self.assertNotIn('test_family_c', result.duplicate_families) + + def test_duplicate_samples(self): + """ + Test that validation finds duplicated metric families + """ + dump = MetricsDump.from_lines(""" +# TYPE test_family_a gauge +test_family_a {hello="world"} 1234 1234 +test_family_a {hello="world"} 1234 1234 + """) + + result = dump.validate() + + self.assertIn('test_family_a', result.duplicate_families) + self.assertNotIn('test_family_b', result.duplicate_families) + + +class DiffTests(unittest.TestCase): + def test_added_families(self): + from_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 + """) + + to_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 +test_family_a {hello="universe"} 0 0 + +test_family_b {} 0 0 + """) + + result = from_dump.diff(to_dump) + + self.assertIn('test_family_b', result.added_families) + self.assertNotIn('test_family_a', result.added_families) + + def test_removed_families(self): + from_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 +test_family_a {hello="universe"} 0 0 + +test_family_b {} 0 0 + """) + + to_dump = MetricsDump.from_lines(""" +test_family_a {hello="world"} 0 0 + """) + + result = from_dump.diff(to_dump) + + self.assertIn('test_family_b', result.removed_families) + self.assertNotIn('test_family_a', result.removed_families) \ No newline at end of file diff --git a/test/lib/experiment.py b/test/lib/experiment.py new file mode 100644 index 0000000..b61b069 --- /dev/null +++ b/test/lib/experiment.py @@ -0,0 +1,79 @@ +import contextlib +import http.server +import logging +import random +import socketserver +import tempfile +import threading +import time +import typing +import unittest +from collections import defaultdict +from datetime import datetime +from enum import Enum, auto +from functools import partial +from pathlib import Path +from typing import Dict + +from frozendict import frozendict + +from lib.net import SocketAddress +from lib.prometheus import PrometheusInstance, RemotePrometheusArchive + + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(f'{__name__}') + + +ENDPOINT_ADDRESS = SocketAddress('localhost', 9500) + + +class TestMetricsHTTPHandler(http.server.BaseHTTPRequestHandler): + """A test HTTP endpoint for Prometheus to scrape.""" + + + def do_GET(self): + if self.path != '/metrics': + self.send_error(404) + + self.send_response(200) + self.end_headers() + + self.wfile.write(b""" +# TYPE test_counter counter +test_counter {abc="123"} 0 +test_counter {abc="456"} 0 + +test_untyped {abc="123"} 0 +test_untyped {abc="456"} 0 +""") + + +cm = contextlib.ExitStack() + +work_dir = Path(cm.enter_context(tempfile.TemporaryDirectory())) + +archive = RemotePrometheusArchive.for_tag('latest').download() +prometheus: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir)) + +prometheus.start() + + + +httpd = http.server.HTTPServer(ENDPOINT_ADDRESS, TestMetricsHTTPHandler) +thread = threading.Thread(target=httpd.serve_forever, daemon=True) + +prometheus.set_static_scrape_config('test', [ENDPOINT_ADDRESS]) + +thread.start() + +input('Press any key...') + + +httpd.shutdown() +thread.join() + + +cm.close() + + diff --git a/test/utils/jar_utils.py b/test/lib/jar_utils.py similarity index 78% rename from test/utils/jar_utils.py rename to test/lib/jar_utils.py index d3a02f9..a70440c 100644 --- a/test/utils/jar_utils.py +++ b/test/lib/jar_utils.py @@ -1,4 +1,3 @@ -import argparse from dataclasses import dataclass import logging import re @@ -10,8 +9,10 @@ from pathlib import Path from xml.etree import ElementTree -from utils.net import SocketAddress -from utils.path_utils import existing_file_arg +import click + +from lib.net import SocketAddress +from lib.path_utils import existing_file_arg @dataclass class ExporterJar: @@ -71,7 +72,7 @@ def start_standalone(self, listen_address: SocketAddress, self.logger.info('Standalone log file: %s', logfile_path) - logfile = logfile_path.open('w') + logfile = logfile_path.open('w') # TODO: cleanup command = ['java', '-jar', self.path, @@ -83,4 +84,23 @@ def start_standalone(self, listen_address: SocketAddress, self.logger.debug('Standalone exec(%s)', ' '.join(command)) - return subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) \ No newline at end of file + return subprocess.Popen(command, stdout=logfile, stderr=subprocess.STDOUT) + + +class ExporterJarParamType(click.ParamType): + name = "path" + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> ExporterJar: + if isinstance(value, ExporterJar): + return value + + try: + if isinstance(value, str): + for t in ExporterJar.ExporterType: + if t.name.lower() == value.lower(): + return ExporterJar.from_path(ExporterJar.default_jar_path(t)) + + return ExporterJar.from_path(value) + + except Exception as e: + self.fail(str(e), param, ctx) diff --git a/test/utils/net.py b/test/lib/net.py similarity index 66% rename from test/utils/net.py rename to test/lib/net.py index 44aec4d..0200c2c 100644 --- a/test/utils/net.py +++ b/test/lib/net.py @@ -7,7 +7,3 @@ class SocketAddress(typing.NamedTuple): def __str__(self) -> str: return f'{self.host}:{self.port}' - - -# def addr_str(address: (str, int)): -# return ':'.join(map(str, address)) \ No newline at end of file diff --git a/test/utils/path_utils.py b/test/lib/path_utils.py similarity index 78% rename from test/utils/path_utils.py rename to test/lib/path_utils.py index 32ba789..3133a35 100644 --- a/test/utils/path_utils.py +++ b/test/lib/path_utils.py @@ -1,4 +1,3 @@ -import argparse from os import PathLike from pathlib import Path @@ -6,10 +5,10 @@ def existing_file_arg(path: PathLike): path = Path(path) if not path.exists(): - raise ValueError(f'file "{path}" does not exist.') + raise ValueError(f'{path}: file does not exist.') if not path.is_file(): - raise ValueError(f'"{path}" is not a regular file.') + raise ValueError(f'{path}: not a regular file.') return path diff --git a/test/utils/prometheus.py b/test/lib/prometheus.py similarity index 83% rename from test/utils/prometheus.py rename to test/lib/prometheus.py index a7827e2..d0f15da 100644 --- a/test/utils/prometheus.py +++ b/test/lib/prometheus.py @@ -7,15 +7,20 @@ import subprocess import tarfile import time +import typing as t import urllib.request import urllib.error from contextlib import contextmanager from datetime import datetime, timedelta +from functools import wraps +from io import TextIOWrapper from pathlib import Path -from typing import List, NamedTuple, Optional, Union +from typing import List, NamedTuple, Optional, Union, TextIO from urllib.parse import urlparse import appdirs +import click +import cloup from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.primitives import hashes @@ -26,7 +31,10 @@ import logging -from utils.net import SocketAddress +from lib.ccm import TestCluster +from lib.click_helpers import fixup_kwargs + +from lib.net import SocketAddress class _TqdmIOStream(object): @@ -165,7 +173,9 @@ class PrometheusInstance: listen_address: SocketAddress directory: Path = None + process: subprocess.Popen = None + log_file: TextIO tls_key_path: Path tls_cert_path: Path @@ -215,7 +225,6 @@ def setup_tls(self): ).not_valid_before( datetime.utcnow() ).not_valid_after( - # certificate will be valid for a day datetime.utcnow() + timedelta(days=1) ).add_extension( x509.SubjectAlternativeName([x509.DNSName(self.listen_address.host)]), @@ -242,8 +251,7 @@ def start(self, wait=True): yaml.safe_dump(config, f) - logfile_path = self.directory / 'prometheus.log' - logfile = logfile_path.open('w') + self.log_file = (self.directory / 'prometheus.log').open('w') self.logger.info('Starting Prometheus...') self.process = subprocess.Popen( @@ -251,7 +259,7 @@ def start(self, wait=True): f'--web.config.file={web_config_path}', f'--web.listen-address={self.listen_address}'], cwd=str(self.directory), - stdout=logfile, + stdout=self.log_file, stderr=subprocess.STDOUT ) @@ -332,3 +340,54 @@ def __exit__(self, exc_type, exc_val, exc_tb): if self.process is not None: self.process.__exit__(exc_type, exc_val, exc_tb) + + if self.log_file is not None: + self.log_file.close() + + +def with_prometheus(): + def decorator(func: t.Callable) -> t.Callable: + @cloup.option_group( + "Prometheus Archive", + cloup.option('--prometheus-version', metavar='TAG'), + cloup.option('--prometheus-archive', metavar='PATH/URL'), + constraint=cloup.constraints.mutually_exclusive + ) + @click.pass_context + @wraps(func) + def wrapper(ctx: click.Context, + prometheus_version: str, + prometheus_archive: str, + working_directory: Path, + ccm_cluster: t.Optional[TestCluster] = None, + **kwargs): + + if prometheus_version is None and prometheus_archive is None: + prometheus_version = 'latest' + + if prometheus_version is not None: + archive = RemotePrometheusArchive.for_tag(prometheus_version) + + else: + archive = archive_from_path_or_url(prometheus_archive) + + if isinstance(archive, RemotePrometheusArchive): + archive = archive.download() + + prometheus = ctx.with_resource(PrometheusInstance( + archive=archive, + working_directory=working_directory + )) + + if ccm_cluster: + prometheus.set_static_scrape_config('cassandra', + [str(n.exporter_address) for n in ccm_cluster.nodelist()] + ) + + fixup_kwargs() + + func(prometheus=prometheus, **kwargs) + + return wrapper + + return decorator diff --git a/test/utils/prometheus_tests.py b/test/lib/prometheus_tests.py similarity index 91% rename from test/utils/prometheus_tests.py rename to test/lib/prometheus_tests.py index 218095b..81a9eef 100644 --- a/test/utils/prometheus_tests.py +++ b/test/lib/prometheus_tests.py @@ -17,8 +17,8 @@ from frozendict import frozendict -from utils.net import SocketAddress -from utils.prometheus import PrometheusInstance, RemotePrometheusArchive +from lib.net import SocketAddress +from lib.prometheus import PrometheusInstance, RemotePrometheusArchive logging.basicConfig(level=logging.DEBUG) @@ -143,6 +143,10 @@ def run_test(mode: EndpointMode): class ConcurrentPrometheusInstancesTest(unittest.TestCase): def test_concurrent_instances(self): + """verify that trying to start a 2nd copy of prometheus fails. + prometheus + this is handled by creating a unique server tls cert for each instance and requiring a valid cert on connections. + if the api client connects to the wrong instance cert verification will fail and """ cm = contextlib.ExitStack() # TODO: clean this up work_dir1 = Path(cm.enter_context(tempfile.TemporaryDirectory())) # TODO: make these delete only if no exception occured @@ -158,6 +162,5 @@ def test_concurrent_instances(self): prometheus2.start() + cm.close() - -pass \ No newline at end of file diff --git a/test/utils/schema.py b/test/lib/schema.py similarity index 69% rename from test/utils/schema.py rename to test/lib/schema.py index af56689..6b3921c 100644 --- a/test/utils/schema.py +++ b/test/lib/schema.py @@ -2,11 +2,13 @@ from dataclasses import dataclass from os import PathLike import typing as t + +import click import yaml from pathlib import Path from collections import namedtuple -from utils.path_utils import existing_file_arg +from lib.path_utils import existing_file_arg @dataclass class CqlSchema: @@ -34,3 +36,16 @@ def default_schema_path() -> Path: test_dir = Path(__file__).parents[1] return test_dir / "schema.yaml" + +class CqlSchemaParamType(click.ParamType): + name = "path" + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> CqlSchema: + if isinstance(value, CqlSchema): + return value + + try: + return CqlSchema.from_path(value) + + except Exception as e: + self.fail(str(e), param, ctx) diff --git a/test/capture_dump.py b/test/old/capture_dump.py similarity index 97% rename from test/capture_dump.py rename to test/old/capture_dump.py index 8e0302d..1fa4707 100644 --- a/test/capture_dump.py +++ b/test/old/capture_dump.py @@ -8,9 +8,9 @@ import urllib.request from pathlib import Path -from utils.ccm import TestCluster -from utils.jar_utils import ExporterJar -from utils.schema import CqlSchema +from lib.ccm import TestCluster +from lib.jar_utils import ExporterJar +from lib.schema import CqlSchema logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) diff --git a/test/create_demo_cluster.py b/test/old/create_demo_cluster.py similarity index 90% rename from test/create_demo_cluster.py rename to test/old/create_demo_cluster.py index 575300b..505adee 100644 --- a/test/create_demo_cluster.py +++ b/test/old/create_demo_cluster.py @@ -16,11 +16,11 @@ import yaml from frozendict import frozendict -from utils.ccm import TestCluster -from utils.jar_utils import ExporterJar -from utils.path_utils import nonexistent_or_empty_directory_arg -from utils.prometheus import PrometheusInstance, PrometheusArchive -from utils.schema import CqlSchema +from lib.ccm import TestCluster +from lib.jar_utils import ExporterJar +from lib.path_utils import nonexistent_or_empty_directory_arg +from lib.prometheus import PrometheusInstance, RemotePrometheusArchive +from lib.schema import CqlSchema logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) @@ -44,7 +44,7 @@ ExporterJar.add_jar_argument('--exporter-jar', parser) CqlSchema.add_schema_argument('--schema', parser) - PrometheusArchive.add_archive_argument('--prometheus-archive', parser) + RemotePrometheusArchive.add_archive_argument('--prometheus-archive', parser) args = parser.parse_args() diff --git a/test/debug_agent.py b/test/old/debug_agent.py similarity index 98% rename from test/debug_agent.py rename to test/old/debug_agent.py index 6e8fe3e..ed51a36 100644 --- a/test/debug_agent.py +++ b/test/old/debug_agent.py @@ -8,7 +8,7 @@ from ccmlib.cluster import Cluster from ccmlib.cluster_factory import ClusterFactory -from utils.jar_utils import ExporterJar +from lib.jar_utils import ExporterJar def create_ccm_cluster(cluster_directory: Path, cassandra_version: str, node_count: int): diff --git a/test/e2e_test.py b/test/old/e2e_test.py similarity index 79% rename from test/e2e_test.py rename to test/old/e2e_test.py index dcb0a9e..4b11f06 100644 --- a/test/e2e_test.py +++ b/test/old/e2e_test.py @@ -19,34 +19,17 @@ from frozendict import frozendict -from utils.ccm import TestCluster -from utils.jar_utils import ExporterJar -from utils.path_utils import nonexistent_or_empty_directory_arg -from utils.prometheus import PrometheusInstance, PrometheusArchive -from utils.schema import CqlSchema +from lib.ccm import TestCluster +from lib.jar_utils import ExporterJar +from lib.path_utils import nonexistent_or_empty_directory_arg +from lib.prometheus import PrometheusInstance, RemotePrometheusArchive +from lib.schema import CqlSchema logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) -class TestMetricsHTTPHandler(http.server.BaseHTTPRequestHandler): - """A test HTTP endpoint for Prometheus to scrape.""" - def do_GET(self): - if self.path != '/metrics': - self.send_error(404) - - self.send_response(200) - self.end_headers() - - # if self.server.server_port == 9500: - if random.choice([True, False]): - self.wfile.write(b'# TYPE test_family gauge\n' - b'test_family 123\n') - - else: - self.wfile.write(b'# TYPE test_family gauge\n' - b'test_family123\n') if __name__ == '__main__': @@ -66,19 +49,18 @@ def do_GET(self): default=3) ExporterJar.add_jar_argument('--exporter-jar', parser) - CqlSchema.add_schema_argument('--schema', parser) - PrometheusArchive.add_archive_argument('--prometheus-archive', parser) + CqlSchema.add_schema_argument('--' + 'schema', parser) + RemotePrometheusArchive.add_archive_argument('--prometheus-archive', parser) args = parser.parse_args() if args.working_directory is None: args.working_directory = Path(tempfile.mkdtemp()) - def delete_working_dir(): shutil.rmtree(args.working_directory) - with contextlib.ExitStack() as defer: if not args.keep_working_directory: defer.callback(delete_working_dir) # LIFO order -- this gets called last @@ -104,8 +86,8 @@ def delete_working_dir(): # httpd = http.server.HTTPServer(("", 9501), DummyPrometheusHTTPHandler) # threading.Thread(target=httpd.serve_forever, daemon=True).start() - prometheus.set_scrape_config('cassandra', - list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) + prometheus.set_static_scrape_config('cassandra', + list(map(lambda n: f'localhost:{n.exporter_port}', ccm_cluster.nodelist()))) # prometheus.set_scrape_config('cassandra', ['localhost:9500', 'localhost:9501']) prometheus.start() diff --git a/test/e2e_test_tests.py b/test/old/e2e_test_tests.py similarity index 100% rename from test/e2e_test_tests.py rename to test/old/e2e_test_tests.py diff --git a/test/metric_dump_tool.py b/test/old/metric_dump_tool.py similarity index 100% rename from test/metric_dump_tool.py rename to test/old/metric_dump_tool.py diff --git a/test/metric_dump_tool_tests.py b/test/old/metric_dump_tool_tests.py similarity index 99% rename from test/metric_dump_tool_tests.py rename to test/old/metric_dump_tool_tests.py index 2994bbb..b3d1f88 100644 --- a/test/metric_dump_tool_tests.py +++ b/test/old/metric_dump_tool_tests.py @@ -1,4 +1,3 @@ -import pprint import unittest from metric_dump_tool import MetricsDump import metric_dump_tool diff --git a/test/test_tool.py b/test/test_tool.py index 86cf3d5..ee9de0e 100644 --- a/test/test_tool.py +++ b/test/test_tool.py @@ -1,263 +1,22 @@ -import argparse -import inspect import logging import os import sys -import tarfile import time import typing as t -from contextlib import contextmanager -import shutil -import tempfile -from functools import wraps, update_wrapper, WRAPPER_UPDATES from itertools import chain -from pathlib import Path -from tarfile import TarFile import pkg_resources -import click import cloup -from utils.ccm import TestCluster -from utils.jar_utils import ExporterJar -from utils.prometheus import PrometheusInstance, RemotePrometheusArchive, archive_from_path_or_url -from utils.schema import CqlSchema +from lib.ccm import TestCluster, with_ccm_cluster +from lib.click_helpers import with_working_directory +from lib.prometheus import PrometheusInstance, with_prometheus +from tools.dump import dump logger = logging.getLogger('test-tool') -def ppstrlist(sl: t.List[t.Any], conj: str = 'or', quote: bool = False): - joins = [', '] * len(sl) - joins += [f' {conj} ', ''] - - joins = joins[-len(sl):] - - if quote: - sl = [f'"{s}"' for s in sl] - - return ''.join(chain.from_iterable(zip(sl, joins))) - - - -def fixup_kwargs(*skip: str): - """ - inspect the caller's frame, grab any arguments and shove them back into kwargs - - this is useful when the caller is a wrapper and wants to pass on the majority its arguments to the wrapped function - """ - - caller_frame = inspect.stack()[1].frame - args, _, kwvar, values = inspect.getargvalues(caller_frame) - - args: t.List[str] = [a for a in args if a not in skip] - - kwargs: t.Dict[str, t.Any] = values[kwvar] - - for a in args: - v = values[a] - if isinstance(v, click.Context): - continue - - kwargs[a] = v - - -def with_working_directory(): - def decorator(func: t.Callable) -> t.Callable: - @click.option('-C', '--working-directory', type=click.Path(path_type=Path), - help="location to install Cassandra and/or Prometheus. Must be empty or not exist. Defaults to a temporary directory.") - @click.option('--keep-working-directory', is_flag=True, - help="don't delete the working directory on exit.") - @click.pass_context - @wraps(func) - def wrapper(ctx: click.Context, working_directory: Path, keep_working_directory: bool, **kwargs): - @contextmanager - def working_dir_ctx(): - nonlocal working_directory, keep_working_directory - - if working_directory is None: - working_directory = Path(tempfile.mkdtemp()) - - logger.info('Working directory is: %s', working_directory) - - try: - yield working_directory - finally: - if not keep_working_directory: - logger.debug('Deleting working directory') - shutil.rmtree(working_directory) - - working_directory = ctx.with_resource(working_dir_ctx()) - - fixup_kwargs() - - func(**kwargs) - - return wrapper - - return decorator - - -class ExporterJarParamType(click.ParamType): - name = "path" - - def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> ExporterJar: - if isinstance(value, ExporterJar): - return value - - try: - if isinstance(value, str): - for t in ExporterJar.ExporterType: - if t.name.lower() == value.lower(): - return ExporterJar.from_path(ExporterJar.default_jar_path(t)) - - - return ExporterJar.from_path(value) - - except Exception as e: - self.fail(str(e), param, ctx) - - -class CqlSchemaParamType(click.ParamType): - name = "path" - - def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> CqlSchema: - if isinstance(value, CqlSchema): - return value - - try: - return CqlSchema.from_path(value) - - except Exception as e: - self.fail(str(e), param, ctx) - - -def with_ccm_cluster(): - def decorator(func: t.Callable) -> t.Callable: - - jar_default_path = None - - # noinspection PyBroadException - try: - jar_default_path = ExporterJar.default_jar_path() - - except: - logger.warning('Failed to determine default cassandra-exporter jar path', exc_info=True) - - jar_types = [type.name.lower() for type in ExporterJar.ExporterType] - - @click.argument('cassandra_version') - @cloup.option_group( - "Cassandra", - cloup.option('--cluster-name', 'cassandra_cluster_name', default='test-cluster', show_default=True), - cloup.option('--topology', 'cassandra_topology', - type=(int, int, int), default=(2, 3, 1), show_default=True, - metavar='DCS RACKS NODES', help="number of data centers, racks per data center, and nodes per rack."), - cloup.option('-j', '--exporter-jar', required=True, default=jar_default_path, show_default=True, type=ExporterJarParamType(), - help=f"path of the cassandra-exporter jar, either {ppstrlist(jar_types)} builds, or one of {ppstrlist(jar_types, quote=True)} for the default jar of that type."), - cloup.option('-s', '--schema', 'cql_schema', default=CqlSchema.default_schema_path(), show_default=True, type=CqlSchemaParamType(), - help='path of the CQL schema YAML file to apply on cluster start. The YAML file must contain a list of CQL statement strings.') - ) - @click.pass_context - @wraps(func) - def wrapper(ctx: click.Context, - cassandra_version: str, cassandra_cluster_name: str, cassandra_topology: t.Tuple[int, int, int], - exporter_jar: ExporterJar, - cql_schema: t.Optional[CqlSchema], - working_directory: Path, **kwargs): - - datacenters, racks, nodes, = cassandra_topology - - logger.info('Creating Cassandra %s cluster, with:') - logger.info(' Topology: %s data center(s), %s rack(s) per DC, %s node(s) per rack (%s node(s) total)', datacenters, racks, nodes, (nodes * racks * datacenters)) - logger.info(' cassandra-exporter: %s', exporter_jar) - - ccm_cluster = ctx.with_resource(TestCluster( - cluster_directory=(working_directory / cassandra_cluster_name), - cassandra_version=cassandra_version, - nodes=nodes*racks*datacenters, racks=racks, datacenters=datacenters, - exporter_jar=exporter_jar, - initial_schema=cql_schema - )) - - fixup_kwargs() - - func(ccm_cluster=ccm_cluster, **kwargs) - - - return wrapper - - return decorator - - -# class PrometheusArchiveParamType(click.ParamType): -# name = "tag/path/URL" -# -# def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> PrometheusArchive: -# if isinstance(value, PrometheusArchive): -# return value -# -# try: -# if isinstance(value, str): -# for t in ExporterJar.ExporterType: -# if t.name.lower() == value.lower(): -# return ExporterJar.from_path(ExporterJar.default_jar_path(t)) -# -# -# return ExporterJar.from_path(value) -# -# except Exception as e: -# self.fail(str(e), param, ctx) - - -def with_prometheus(): - def decorator(func: t.Callable) -> t.Callable: - @cloup.option_group( - "Prometheus Archive", - cloup.option('--prometheus-version', metavar='TAG'), - cloup.option('--prometheus-archive', metavar='PATH/URL'), - constraint=cloup.constraints.mutually_exclusive - ) - - @click.pass_context - @wraps(func) - def wrapper(ctx: click.Context, - prometheus_version: str, - prometheus_archive: str, - working_directory: Path, - ccm_cluster: t.Optional[TestCluster] = None, - **kwargs): - - if prometheus_version is None and prometheus_archive is None: - prometheus_version = 'latest' - - if prometheus_version is not None: - archive = RemotePrometheusArchive.for_tag(prometheus_version) - - else: - archive = archive_from_path_or_url(prometheus_archive) - - if isinstance(archive, RemotePrometheusArchive): - archive = archive.download() - - prometheus = ctx.with_resource(PrometheusInstance( - archive=archive, - working_directory=working_directory - )) - - if ccm_cluster: - prometheus.set_static_scrape_config('cassandra', - [str(n.exporter_address) for n in ccm_cluster.nodelist()] - ) - - fixup_kwargs() - - func(prometheus=prometheus, **kwargs) - - return wrapper - - - return decorator @cloup.group() @@ -265,13 +24,12 @@ def cli(): pass - @cli.command('demo') @with_working_directory() @with_ccm_cluster() def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): """ - Start C* with the exporter jar (agent or standalone). + Start a Cassandra cluster with cassandra-exporter installed (agent or standalone). Optionally setup a schema. Wait for ctrl-c to shut everything down. """ @@ -286,47 +44,6 @@ def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): input("Press any key to stop cluster...") -@cli.group('dump') -def dump(): - pass - - -@dump.command('capture') -@with_working_directory() -@with_ccm_cluster() -@click.argument('filename') -def dump_capture(ccm_cluster: TestCluster, filename: str, **kwargs): - """Capture metrics from cassandra-exporter and save them to disk.""" - - logger.info('Capturing metrics dump.') - - # with tarfile.open(filename, 'w') as tf: - # tf. - - - - # for node in ccm_cluster.nodelist(): - # url = f'http://{node.network_interfaces["exporter"]}/metrics?x-accept=text/plain' - # destination = args.output_directory / f'{node.name}.txt' - # urllib.request.urlretrieve(url, destination) - # - # logger.info(f'Wrote {url} to {destination}') - - -@dump.command('validate') -def dump_validate(): - pass - - -def dump_compare(): - pass - - - -# capture dump (start C* with exporter, fetch and write metrics to file) - # this is every similar to the demo cmd -# validate dump (check for syntax errors, etc) -# compare/diff dump (list metrics added & removed) @cli.command() @@ -382,10 +99,23 @@ def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance, **kwargs): # sys.exit(-1) -# def timing(): -# + +@cli.command('benchmark') +@with_working_directory() +@with_ccm_cluster() +def benchmark(ccm_cluster: TestCluster, **kwargs): + """""" + pass + + + +cli.add_command(dump) + def main(): + os.environ['CCM_JAVA8_DEBUG'] = 'please' + logging.basicConfig(level=logging.DEBUG) + # load ccm extensions (useful for ccm-java8, for example) for entry_point in pkg_resources.iter_entry_points(group='ccm_extension'): entry_point.load()() @@ -394,7 +124,4 @@ def main(): if __name__ == '__main__': - os.environ['CCM_JAVA8_DEBUG'] = 'please' - logging.basicConfig(level=logging.DEBUG) - #logger.info("Hello!") main() \ No newline at end of file diff --git a/test/tools/__pycache__/dump.cpython-39.pyc b/test/tools/__pycache__/dump.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6cdd1c87c8a45c37131bc603a6776d59bd0a9c0 GIT binary patch literal 3037 zcmZuzOK%&=5$>Mpd5|J0ik9Dp5J5IsCKhSS8ze9sXC2v_*IH|=?JRa+V=$R+Nka{1 z#@$0pB1R|ax(JW}xvV!Jk9)}PS)i{u<(@+jY#vojQhsHKE>2g!s;j>GYNBSdPT=|e zH-C)(x+HZbV=QpYzZ-b)Z>RpK7Su-d zpl;@|bY;{C8jCf}plRl|f|elDRqo7a&=#%f8h2-ud)z%GCp74AKR72k({)~(QPDUd zq7JP&J}7J=lJ@3b#U3tJHMEB;cOnRx_H*Zb(i4O%lyjn)ZhxIk4W$8-_dv5J*wHm zsNCyWdgZZDH!KGS5@Z z_;^&rxby1|#Di=>Qw)ST-I{!`m5)YI##Oh>yKz*MV=1n6Uqorbqf&HXq|1|?o$g4K zGKp0eH&)wU6NzDdCH{45@I(oz1`&@&gNHI7f>CM^M@qpKGTIP_MK0k+12l4gMydhs z)h~{u3tP3(E(-*)CLJ8u0IQN6nA|^#Myd7|w&@iPx+IIrB+t-WulYtIFk`a`ZUhf1 zd}zOgrv3?{B2zLWl$5lB*dK!&TAXt03ko`W%D7$88DO-crE`@`UCw6o55GFLxdSV` z8R0JA%s<6_{293icuOd+L1gQE1>PDj?PFHfruB+dHtzEo;Y~&uIk73p+Bb+gy$+}2 z?{jF&U7kHXc6jR^dG_g3>)1W^_-f@0SH2{?UAbS9uh_9)QP4C%vsQVav9FTjTG<@7 zrmK}-)$k-^eW&ta&bg|Ft7dSg=YM(rXWj&ytxrCGR7SGwM%}GtP`V3DU0VkJEXq4F zAA$1i;2Aim=!NK(dt!u$;fdPsKL$|x;xJKVnD0-%vt>{O*gV?~U{0MDnfN+`hEu-f5?}^EVaDmMJ?KF?0RNd*nS_WHtFCZ-dtAStUMV{unNAePG z?v1nkEI-J4_Pk#^b2s{Oo-Ervt7lE>r8q1HMH(fUw#&m3RPCrJ(j+!XFc_*l(+uUr z+bhfB)?hH{3>FFd^TUlOjzv-aY*O1;==u})GnqqnPZ&m}*Vc|gY)JIHc1HWaDcS{7 zcfp4&$#!zxx%cG3!|=h~?O%U%|Iy=c`|gALn&pMa=EnON#S{RbnaS3gigrYZVb|^D zWsI+dQnG`mxMP{7$?vr%#q%*_v|a&)M3NCNM6WKPvJjKc+TA~h&%N=JpzHn>RJumzL|aB7-IJS>1u-wtU5ZCXa`BAlVFfgql5`Ltm*thQxYuH`fL zTlQb;KWqi$Kist+S`FHu9lCC{EO`UwzeBAKC_jXzehNa6fN~N5u>`hYLkF5GyrF-Y zEZ%D7&EeKdARNw4Sy1N=pxD)G_rN|Z1P>z_jUL0*3_Ajf9pL1#&`yB^+K$UZ?Sj9? zGK-PUmk)9&HpBCu!b^!>L zF<=s49gx;ntg?X5fWs0&`=1n8fTC18F(l%?u9<)dWF2oS2lhEE<()TeHCCg+aub9J5Jt`iQ;kOZ z%d(4K0|@ILHPL^E9g!_fNqeo3-~8QK&3==2$Ke0nE|g?Ql|3<5KTzEQI#`0YjVF7S zIYlDJz3tkj$(N;FPhiHpNHh5xc!%!{_%_fEPP#Gw+x*&+ALB+3K)g|lkzD@Y7syTc z0zo8n%Y*z4%atiN`M4~`B^Dg>AtC@Wv6ve!xACC2z)&+5qg_v7bZ)I9KY{n&RqY|E zfjhK^G;j85JTR+!HBe ztCC0s@;(j%H1ctwYrCTS*pR908uhLO_1xzM`+#5b4W@k{y735e0!e0pDMJE?7~^tt ylh^Ki1}g2}o&)y|I$7ZYq%?%TZ?)%dRvr3vhN*!S%%_*$?{s|Hq8$fii~bLfjv}D| literal 0 HcmV?d00001 diff --git a/test/tools/dump.py b/test/tools/dump.py new file mode 100644 index 0000000..eb5c318 --- /dev/null +++ b/test/tools/dump.py @@ -0,0 +1,105 @@ +import logging +from pathlib import Path + +import yaml +import urllib.request +import typing as t + +from lib.ccm import TestCluster, with_ccm_cluster +from lib.click_helpers import with_working_directory + +import click +import cloup + +logger = logging.getLogger('dump') + +@cloup.group('dump') +def dump(): + """Commands to capture, validate and diff metrics dumps""" + + +DUMP_MANIFEST_NAME = 'dump-manifest.yaml' + + +@dump.command('capture') +@with_working_directory() +@with_ccm_cluster() +@click.argument('destination') +def dump_capture(ccm_cluster: TestCluster, destination: Path, **kwargs): + """Start a Cassandra cluster, capture metrics from each node's cassandra-exporter and save them to disk.""" + + ccm_cluster.start() + + destination = Path(destination) + destination.mkdir(exist_ok=True) + + logger.info('Capturing metrics dump to %s...', destination) + + with (destination / DUMP_MANIFEST_NAME).open('w') as f: + manifest = { + 'version': '20221207', + 'cassandra': { + 'version': ccm_cluster.version(), + 'topology': { + 'nodes': {n.name: { + 'rack': n.rack, + 'datacenter': n.data_center, + 'ip': n.ip_addr + } for n in ccm_cluster.nodelist()} + } + }, + 'exporter': { + 'version': 'unknown' + } + } + + yaml.safe_dump(manifest, f) + + for node in ccm_cluster.nodelist(): + for mimetype, ext in (('text/plain', 'txt'), ('application/json', 'json')): + url = f'http://{node.exporter_address}/metrics?x-accept={mimetype}' + download_path = destination / f'{node.name}-metrics.{ext}' + + urllib.request.urlretrieve(url, download_path) + + logger.info(f'Wrote {url} to {download_path}') + + +class DumpPathParamType(click.ParamType): + name = 'dump' + + def convert(self, value: t.Any, param: t.Optional[click.Parameter], ctx: t.Optional[click.Context]) -> t.Any: + if isinstance(value, Path): + return value + + p = Path(value) + if p.is_file(): + p = p.parent + + manifest = p / DUMP_MANIFEST_NAME + if not manifest.exists(): + self.fail(f'{p}: not a valid dump: {manifest} does not exist.', param, ctx) + + return p + + +@dump.command('validate') +@click.argument('dump', type=DumpPathParamType()) +def dump_validate(dump: Path, **kwargs): + """Validate a metrics dump using Prometheus's promtool""" + pass + + +@dump.command('diff') +@click.argument('dump1', type=DumpPathParamType()) +@click.argument('dump2', type=DumpPathParamType()) +def dump_diff(dump1: Path, dump2: Path): + """Compare two metrics dumps and output the difference""" + pass + + + +# capture dump (start C* with exporter, fetch and write metrics to file) +# this is every similar to the demo cmd +# validate dump (check for syntax errors, etc) +# compare/diff dump (list metrics added & removed) diff --git a/test/validate_metrics.py b/test/validate_metrics.py deleted file mode 100644 index 0748735..0000000 --- a/test/validate_metrics.py +++ /dev/null @@ -1 +0,0 @@ -# spin up compare current exporter build dump against previous known \ No newline at end of file From 525a4c36a9b238e7d2ecc8d22b0b054845b4a3b9 Mon Sep 17 00:00:00 2001 From: johndelcastillo Date: Thu, 2 Feb 2023 16:09:37 +1100 Subject: [PATCH 12/19] figuring stuff out --- .gitignore | 4 +++- test/pyproject.toml | 10 +++------- test/setup.py | 8 +++++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index 2ce613a..319d825 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,6 @@ dependency-reduced-pom.xml release.properties *.srl - +test/build +*.egg-info +__pycache__ \ No newline at end of file diff --git a/test/pyproject.toml b/test/pyproject.toml index eec228f..fa0eeec 100644 --- a/test/pyproject.toml +++ b/test/pyproject.toml @@ -2,10 +2,6 @@ requires = ["setuptools", "setuptools-scm"] [project] -dependencies = [ - 'click', - 'cloup', - 'ccm', - 'appdirs', - 'cryptography' -] \ No newline at end of file +name = "cassandra-exporter" +requires-python = ">=3.8" +dynamic = ["version", "description", "authors", "dependencies"] \ No newline at end of file diff --git a/test/setup.py b/test/setup.py index 32f4013..9a4a66c 100644 --- a/test/setup.py +++ b/test/setup.py @@ -6,6 +6,8 @@ description='End-to-end testing tools for cassandra-exporter', author='Adam Zegelin', author_email='adam@instaclustr.com', - packages=['cassandra-exporter-e2e-tests'], - install_requires=['ccm', 'prometheus_client', 'cassandra-driver', 'frozendict', 'pyyaml', 'tqdm'], -) \ No newline at end of file + packages=['lib', 'tools'], + install_requires=['ccm', 'prometheus_client', + 'cassandra-driver', 'frozendict', 'pyyaml', 'tqdm', 'click', + 'cloup', 'appdirs', 'cryptography'], +) From c0e9c9db9dfab0f168ca8163debd266b8e4059e2 Mon Sep 17 00:00:00 2001 From: John Del Castillo Date: Fri, 3 Feb 2023 14:08:36 +1100 Subject: [PATCH 13/19] click tests working --- test/test_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_tool.py b/test/test_tool.py index ee9de0e..bbb24ab 100644 --- a/test/test_tool.py +++ b/test/test_tool.py @@ -36,7 +36,7 @@ def run_demo_cluster(ccm_cluster: TestCluster, **kwargs): ccm_cluster.start() for node in ccm_cluster.nodelist(): - logger.info('Node %s cassandra-exporter running on http://%s', node.name, node.network_interfaces['exporter']) + logger.info('Node %s cassandra-exporter running on http://%s', node.name, node.exporter_address) sys.stderr.flush() sys.stdout.flush() From 8886b88f3af463c9f816fa4e3a55f432dd1f9bb5 Mon Sep 17 00:00:00 2001 From: John Del Castillo Date: Tue, 7 Feb 2023 07:49:23 +1100 Subject: [PATCH 14/19] 4.0 support work --- .vscode/settings.json | 4 +++ agent/pom.xml | 4 +-- .../exporter/InternalMetadataFactory.java | 28 +++++++++++-------- ...nalGossiperMBeanMetricFamilyCollector.java | 12 ++++---- common/pom.xml | 4 +-- pom.xml | 6 ++-- standalone/pom.xml | 4 +-- test/lib/ccm.py | 3 +- test/schema.yaml | 9 ------ test/test_tool.py | 5 ++++ 10 files changed, 42 insertions(+), 37 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..0153b31 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "java.configuration.updateBuildConfiguration": "interactive", + "java.compile.nullAnalysis.mode": "disabled" +} \ No newline at end of file diff --git a/agent/pom.xml b/agent/pom.xml index 73f3384..9092467 100644 --- a/agent/pom.xml +++ b/agent/pom.xml @@ -5,11 +5,11 @@ com.zegelin.cassandra-exporter exporter-parent - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT agent - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT Cassandra Exporter Agent diff --git a/agent/src/main/java/com/zegelin/cassandra/exporter/InternalMetadataFactory.java b/agent/src/main/java/com/zegelin/cassandra/exporter/InternalMetadataFactory.java index 7eb07f2..ceb4094 100644 --- a/agent/src/main/java/com/zegelin/cassandra/exporter/InternalMetadataFactory.java +++ b/agent/src/main/java/com/zegelin/cassandra/exporter/InternalMetadataFactory.java @@ -1,29 +1,33 @@ package com.zegelin.cassandra.exporter; import com.zegelin.cassandra.exporter.MetadataFactory; -import org.apache.cassandra.config.CFMetaData; import org.apache.cassandra.config.DatabaseDescriptor; -import org.apache.cassandra.config.Schema; import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.locator.IEndpointSnitch; +import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.schema.Schema; import java.net.InetAddress; import java.util.Optional; import java.util.Set; public class InternalMetadataFactory extends MetadataFactory { - private static Optional getCFMetaData(final String keyspaceName, final String tableName) { - return Optional.ofNullable(Schema.instance.getCFMetaData(keyspaceName, tableName)); + private static Optional getTableMetaData(final String keyspaceName, final String tableName) { + return Optional.ofNullable(Schema.instance.getTableMetadata(keyspaceName, tableName)); + } + + private static Optional getIndexMetadata(final String keyspaceName, final String indexName) { + return Optional.ofNullable(Schema.instance.getIndexTableMetadataRef(keyspaceName, indexName)); } @Override public Optional indexMetadata(final String keyspaceName, final String tableName, final String indexName) { - return getCFMetaData(keyspaceName, tableName) - .flatMap(m -> m.getIndexes().get(indexName)) + return getIndexMetadata(keyspaceName, indexName) + .flatMap(m -> m.get().indexName()) .map(m -> { - final IndexMetadata.IndexType indexType = IndexMetadata.IndexType.valueOf(m.kind.toString()); - final Optional className = Optional.ofNullable(m.options.get("class_name")); + final IndexMetadata.IndexType indexType = IndexMetadata.IndexType.valueOf(m); + final Optional className = Optional.ofNullable(m); return new IndexMetadata() { @Override @@ -41,7 +45,7 @@ public Optional customClassName() { @Override public Optional tableOrViewMetadata(final String keyspaceName, final String tableOrViewName) { - return getCFMetaData(keyspaceName, tableOrViewName) + return getTableMetaData(keyspaceName, tableOrViewName) .map(m -> new TableMetadata() { @Override public String compactionStrategyClassName() { @@ -67,12 +71,12 @@ public Optional endpointMetadata(final InetAddress endpoint) { return Optional.of(new EndpointMetadata() { @Override public String dataCenter() { - return endpointSnitch.getDatacenter(endpoint); + return endpointSnitch.getDatacenter(InetAddressAndPort.getByAddress(endpoint)); } @Override public String rack() { - return endpointSnitch.getRack(endpoint); + return endpointSnitch.getRack(InetAddressAndPort.getByAddress(endpoint)); } }); } @@ -84,6 +88,6 @@ public String clusterName() { @Override public InetAddress localBroadcastAddress() { - return FBUtilities.getBroadcastAddress(); + return FBUtilities.getBroadcastAddressAndPort().getAddress(); } } diff --git a/agent/src/main/java/com/zegelin/cassandra/exporter/collector/InternalGossiperMBeanMetricFamilyCollector.java b/agent/src/main/java/com/zegelin/cassandra/exporter/collector/InternalGossiperMBeanMetricFamilyCollector.java index 0ccbb84..9b378a6 100644 --- a/agent/src/main/java/com/zegelin/cassandra/exporter/collector/InternalGossiperMBeanMetricFamilyCollector.java +++ b/agent/src/main/java/com/zegelin/cassandra/exporter/collector/InternalGossiperMBeanMetricFamilyCollector.java @@ -1,10 +1,12 @@ package com.zegelin.cassandra.exporter.collector; +import com.google.common.collect.ImmutableSet; import com.zegelin.cassandra.exporter.MetadataFactory; import com.zegelin.prometheus.domain.Labels; import com.zegelin.prometheus.domain.NumericMetric; import org.apache.cassandra.gms.EndpointState; import org.apache.cassandra.gms.Gossiper; +import org.apache.cassandra.locator.InetAddressAndPort; import java.net.InetAddress; import java.util.Map; @@ -34,13 +36,11 @@ private InternalGossiperMBeanMetricFamilyCollector(final Gossiper gossiper, fina @Override protected void collect(final Stream.Builder generationNumberMetrics, final Stream.Builder downtimeMetrics, final Stream.Builder activeMetrics) { - final Set> endpointStates = gossiper.getEndpointStates(); + for (InetAddressAndPort endpoint : gossiper.getEndpoints()) { + final InetAddress endpointAddress = endpoint.getAddress(); + final EndpointState state = gossiper.getEndpointStateForEndpoint(endpoint); - for (final Map.Entry endpointState : endpointStates) { - final InetAddress endpoint = endpointState.getKey(); - final EndpointState state = endpointState.getValue(); - - final Labels labels = metadataFactory.endpointLabels(endpoint); + final Labels labels = metadataFactory.endpointLabels(endpointAddress); generationNumberMetrics.add(new NumericMetric(labels, gossiper.getCurrentGenerationNumber(endpoint))); downtimeMetrics.add(new NumericMetric(labels, millisecondsToSeconds(gossiper.getEndpointDowntime(endpoint)))); diff --git a/common/pom.xml b/common/pom.xml index 2267c33..152b36f 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -5,11 +5,11 @@ com.zegelin.cassandra-exporter exporter-parent - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT common - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT Cassandra Exporter Common diff --git a/pom.xml b/pom.xml index 28fdf30..584aaa0 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ com.zegelin.cassandra-exporter exporter-parent - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT pom Cassandra Exporter Parent @@ -15,7 +15,7 @@ - 3.11.2 + 4.1.0 2.5.3 3.1.1 @@ -41,7 +41,7 @@ com.zegelin.cassandra-exporter common - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT org.glassfish.hk2.external diff --git a/standalone/pom.xml b/standalone/pom.xml index d539c16..a8d8d2d 100644 --- a/standalone/pom.xml +++ b/standalone/pom.xml @@ -5,11 +5,11 @@ com.zegelin.cassandra-exporter exporter-parent - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT standalone - 0.9.11-SNAPSHOT + 0.9.12-SNAPSHOT Cassandra Exporter Standalone/CLI diff --git a/test/lib/ccm.py b/test/lib/ccm.py index 1d41a03..aaabcc8 100644 --- a/test/lib/ccm.py +++ b/test/lib/ccm.py @@ -153,7 +153,7 @@ def decorator(func: t.Callable) -> t.Callable: "Cassandra", cloup.option('--cluster-name', 'cassandra_cluster_name', default='test-cluster', show_default=True, help='name of the Cassandra cluster'), - cloup.option('--cassandra-version', default='3.11.14', show_default=True, + cloup.option('--cassandra-version', default='4.1.0', show_default=True, help='Cassandra version to run'), cloup.option('--topology', 'cassandra_topology', type=(int, int, int), default=(2, 3, 1), show_default=True, @@ -176,6 +176,7 @@ def wrapper(ctx: click.Context, datacenters, racks, nodes, = cassandra_topology logger.info('Creating Cassandra %s cluster, with:', cassandra_version) + logger.info(' CCM working directory %s:', working_directory) logger.info(' Topology: %s data center(s), %s rack(s) per DC, %s node(s) per rack (%s node(s) total)', datacenters, racks, nodes, (nodes * racks * datacenters)) logger.info(' cassandra-exporter: %s', exporter_jar) diff --git a/test/schema.yaml b/test/schema.yaml index 24b20f7..046f1ed 100644 --- a/test/schema.yaml +++ b/test/schema.yaml @@ -19,13 +19,4 @@ PRIMARY KEY ((family, labels, bucket), time) ) -- > - CREATE MATERIALIZED VIEW example.numeric_metric_labels AS - SELECT family, labels, bucket, time - FROM example.numeric_metrics - WHERE family IS NOT NULL AND - labels IS NOT NULL AND - bucket IS NOT NULL AND - time IS NOT NULL - PRIMARY KEY (family, labels, bucket, time) diff --git a/test/test_tool.py b/test/test_tool.py index bbb24ab..7e0684b 100644 --- a/test/test_tool.py +++ b/test/test_tool.py @@ -65,6 +65,11 @@ def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance, **kwargs): prometheus.start() + for node in ccm_cluster.nodelist(): + logger.info('Node %s cassandra-exporter running on http://%s', node.name, node.exporter_address) + + logger.info("Prometheus running on: http://%s", prometheus.listen_address) + input("Press any key to stop cluster...") while True: From 1c5685c83bc1d843898b37a3f4b3b4c638e3874a Mon Sep 17 00:00:00 2001 From: johndelcastillo Date: Tue, 7 Feb 2023 13:49:24 +1100 Subject: [PATCH 15/19] fix --- test/test_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_tool.py b/test/test_tool.py index 7e0684b..198d2ae 100644 --- a/test/test_tool.py +++ b/test/test_tool.py @@ -68,7 +68,7 @@ def e2e(ccm_cluster: TestCluster, prometheus: PrometheusInstance, **kwargs): for node in ccm_cluster.nodelist(): logger.info('Node %s cassandra-exporter running on http://%s', node.name, node.exporter_address) - logger.info("Prometheus running on: http://%s", prometheus.listen_address) + logger.info("Prometheus running on: https://%s", prometheus.listen_address) input("Press any key to stop cluster...") From bd8f7c6cb237b7d32c9655f8017908b688d7b1f5 Mon Sep 17 00:00:00 2001 From: John Del Castillo Date: Wed, 8 Feb 2023 12:06:59 +1100 Subject: [PATCH 16/19] Updated readme, added test doco --- README.md | 142 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 138 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c14e86f..72ff06e 100644 --- a/README.md +++ b/README.md @@ -33,13 +33,17 @@ For example, the following PromQL query will return an estimate of the number of ## Compatibility -*cassandra-exporter* is has been tested with: +*cassandra-exporter* is now Cassandra 4.0+ compatible, but the change is not a backwards compatible. Support for older Cassandra versions is via the older releases, as follows: -| Component | Version | +| Cassandra Version | Compatible Exporter Version | |-----------------|---------------| -| Apache Cassandra| 3.0.17 (experimental), 3.11.2, 3.11.3 | -| Prometheus | 2.0 and later | +| Apache Cassandra 4.x | 0.9.12 | +| Apache Cassandra 3.0.17, 3.11.2, 3.11.3 | 0.9.11 | +| Prometheus Version | +|-----------------| +| 2.42.0 +| Other Cassandra and Prometheus versions will be tested for compatibility in the future. ## Usage @@ -407,6 +411,136 @@ We suggest viewing the metrics endpoint (e.g., ) are exported by your Cassandra node. +## Testing + +### Java +There are unit tests in the various projects which will get executed with the maven commands. + +### Integration test harness + +There is an integration test harness available in the */test/* folder. + +#### Requirements + +The test harness uses Python (tested with 3.10). + +Initialise the project by using the pyproject.toml file + + pip install . + +The tool can be launched via + + python test_tool.py + +#### Operation + +There are four modes of operation: + +- `benchmark` + + Not Implemented - TBA - Intended to test the speed of collection. + +- `demo` + + Usage: test_tool.py demo [OPTIONS] + + Start a Cassandra cluster with cassandra-exporter installed (agent or + standalone). Optionally setup a schema. Wait for ctrl-c to shut everything + down. + + Working Directory: + -C, --working-directory PATH location to install Cassandra and/or Prometheus. + Must be empty or not exist. Defaults to a + temporary directory. + --cleanup-working-directory [on-error|always|never] + how to delete the working directory on exit: + "on-error": delete working directory on exit + unless an error occurs, "always": always delete + working directory on exit, "never": never delete + working directory. [default: on-error] + + Cassandra: + --cluster-name TEXT name of the Cassandra cluster [default: test- + cluster] + --cassandra-version TEXT Cassandra version to run [default: 4.1.0] + --topology DCS RACKS NODES number of data centers, racks per data center, + and nodes per rack. [default: 2, 3, 1] + -j, --exporter-jar PATH path of the cassandra-exporter jar to use, + either agent or standalone builds, or one of + "agent" or "standalone" for the currently built + jar of that type in the project directory + (assumes that the sources for this test tool are + in the standard location within the project, and + that the jar(s) have been built). [default: + agent] + -s, --schema PATH path of the CQL schema YAML file to apply on + cluster start. The YAML file must contain a list + of CQL statement strings, which are applied in + order. [default: /root/source/forks/cassandra- + exporter/test/schema.yaml] + +- `dump` + + Usage: test_tool.py dump [OPTIONS] COMMAND [ARGS]... + + Commands to capture, validate and diff metrics dumps + + Options: + --help Show this message and exit. + + Commands: + capture Start a Cassandra cluster, capture metrics from each node's... + diff Compare two metrics dumps and output the difference + validate Validate a metrics dump using Prometheus's promtool. + +- `e2e` - *Note no tests are run at the moment* + + Usage: test_tool.py e2e [OPTIONS] + + Run cassandra-exporter end-to-end tests. + + - Start C* with the exporter JAR (agent or standalone). + - Setup a schema. + - Configure and start prometheus. + - Wait for all scrape targets to get healthy. + - Run some tests. + + Working Directory: + -C, --working-directory PATH location to install Cassandra and/or + Prometheus. Must be empty or not exist. + Defaults to a temporary directory. + --cleanup-working-directory [on-error|always|never] + how to delete the working directory on exit: + "on-error": delete working directory on exit + unless an error occurs, "always": always delete + working directory on exit, "never": never + delete working directory. [default: on-error] + + Cassandra: + --cluster-name TEXT name of the Cassandra cluster [default: test- + cluster] + --cassandra-version TEXT Cassandra version to run [default: 4.1.0] + --topology DCS RACKS NODES number of data centers, racks per data center, + and nodes per rack. [default: 2, 3, 1] + -j, --exporter-jar PATH path of the cassandra-exporter jar to use, + either agent or standalone builds, or one of + "agent" or "standalone" for the currently built + jar of that type in the project directory + (assumes that the sources for this test tool + are in the standard location within the + project, and that the jar(s) have been built). + [default: agent] + -s, --schema PATH path of the CQL schema YAML file to apply on + cluster start. The YAML file must contain a + list of CQL statement strings, which are + applied in order. [default: + /root/source/forks/cassandra- + exporter/test/schema.yaml] + + Prometheus Archive: [mutually exclusive] + --prometheus-version TAG + --prometheus-archive PATH/URL + ## Unstable, Missing & Future Features See the [project issue tracker](https://github.com/instaclustr/cassandra-exporter/issues) for a complete list. From f643f90d5eb5c95fdb06ebf13418982483f280bf Mon Sep 17 00:00:00 2001 From: John Del Castillo Date: Wed, 8 Feb 2023 12:14:23 +1100 Subject: [PATCH 17/19] More doco --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 72ff06e..90d9007 100644 --- a/README.md +++ b/README.md @@ -419,6 +419,7 @@ There are unit tests in the various projects which will get executed with the ma ### Integration test harness There is an integration test harness available in the */test/* folder. +This harness is a work in progress, and is currently only useful for manual verification. #### Requirements From 0e3d93ec9f2622f64f74bb988c1895903c192117 Mon Sep 17 00:00:00 2001 From: John Del Castillo Date: Fri, 17 Feb 2023 12:35:10 +1100 Subject: [PATCH 18/19] remove python guff --- .vscode/settings.json | 4 ---- test/lib/__pycache__/ccm.cpython-39.pyc | Bin 7522 -> 0 bytes .../__pycache__/click_helpers.cpython-39.pyc | Bin 6225 -> 0 bytes test/lib/__pycache__/dump.cpython-39.pyc | Bin 3543 -> 0 bytes test/lib/__pycache__/dump_tests.cpython-39.pyc | Bin 2967 -> 0 bytes test/lib/__pycache__/jar_utils.cpython-39.pyc | Bin 4400 -> 0 bytes test/lib/__pycache__/net.cpython-39.pyc | Bin 564 -> 0 bytes test/lib/__pycache__/path_utils.cpython-39.pyc | Bin 798 -> 0 bytes test/lib/__pycache__/prometheus.cpython-39.pyc | Bin 13412 -> 0 bytes .../__pycache__/prometheus_tests.cpython-39.pyc | Bin 4001 -> 0 bytes test/lib/__pycache__/schema.cpython-39.pyc | Bin 2063 -> 0 bytes test/tools/__pycache__/dump.cpython-39.pyc | Bin 3037 -> 0 bytes 12 files changed, 4 deletions(-) delete mode 100644 .vscode/settings.json delete mode 100644 test/lib/__pycache__/ccm.cpython-39.pyc delete mode 100644 test/lib/__pycache__/click_helpers.cpython-39.pyc delete mode 100644 test/lib/__pycache__/dump.cpython-39.pyc delete mode 100644 test/lib/__pycache__/dump_tests.cpython-39.pyc delete mode 100644 test/lib/__pycache__/jar_utils.cpython-39.pyc delete mode 100644 test/lib/__pycache__/net.cpython-39.pyc delete mode 100644 test/lib/__pycache__/path_utils.cpython-39.pyc delete mode 100644 test/lib/__pycache__/prometheus.cpython-39.pyc delete mode 100644 test/lib/__pycache__/prometheus_tests.cpython-39.pyc delete mode 100644 test/lib/__pycache__/schema.cpython-39.pyc delete mode 100644 test/tools/__pycache__/dump.cpython-39.pyc diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 0153b31..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "java.configuration.updateBuildConfiguration": "interactive", - "java.compile.nullAnalysis.mode": "disabled" -} \ No newline at end of file diff --git a/test/lib/__pycache__/ccm.cpython-39.pyc b/test/lib/__pycache__/ccm.cpython-39.pyc deleted file mode 100644 index 3b1296775fe2f9b2c0fa8e417da24379666137a7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7522 zcmb7J+jAS&dEXnb3t$0)6hx7tB(G)J6+9wj*>0TiNNQz@^2C*C)nwXuo6L5(XGsED zV8OEsN!$QaPpOXcqBuz})0x;aRY+ zv~IyJ$TX)0#jbAaqOAo=*RYLl$u5b$9F*;{SXZ$t_$>rxw`y0#TrsG1=j=Jr)`NO? z-k$F+*bAa>1gE+UyU{&upBDX6u-H9gpAqeHu+&|)m%C@}v!X8tEA}~Y#`*mBf_=fi zaP$(Z9Lu)#zQjyceJru6Up-XqmsyR?J(lc?Ox~25^{??xX`?Bpg@@ejM`&pG+<334 zq}ttf6sN{V{kR?W+yFEBS}=%WpQE|x?fKq)Cknm$e(VH*^n2~zE|!@)?MH*YbN`{s zccZk_??*AGc}!K-AN50y4L)#r+L-CwbGh5yI_Ue@xw^h=v(K>8Y!RebVrSSAewRO!G|4^-vaF<)EkBBL ze5Z0NZA7u#V{Q=ke5cPt54iE8r+PZ?Wi)YEoBLiI@&gRHQ3PP_IuCpvfqH477cxIe z3*7bYM`?w*vFrIgP$EiAKPQUQ0aa@4UOR5PffEUgdt*crKB-+qQBk4|R73elI+plS zEHim11Fg!J>Y@DCa;zLm9d#&oG?c&oDJq(!v?$i# znI8{&al7lUa~|@v5Dof1PYuVR_c~5m?7JKY;pgxyzD(6wRH@Mq`-1>`HjPw^{9uPS zXznysi&UMVYKf}JqNj3NavU!J5gg~K^xJo?em4T5ue!|bUcJY|4#*T;ozQ4?jQ^`K zD1J3)Z(jvRxB3VCWo-KrN<=3~1w*bYMOnjFk(K`{>P5MzB#jw1X^mg=wG8+>8^1NK zz4=b-4SbvGiQ9TY3(~gu)KE$&g04$g8%u}M5hzO30nPNGF)SVHqjIc(YU;6cDBqTT z@T>1DqsqvPwV~N5fc}*;(xEamog!0WeOMXFD}Wm+ix8eO5VcAUk%NpIaXlc8I0G#T{=!GVkhlb0WX_YL0a_>1w-h>;3hOEqf_~1@Lw9tizr8|{9d`!GP%+6j5rjqSO152`Z=ob7`cHl&vpVOIu0oXTzJ4`3*j zcQ}YD@jsyIGF61hw@@Nd2a>v^G~@;4oP18H%NJBg;IpE(Bsb)5oshq=veAC(6Citda0vz3URTccMOQg zKSLZ##}b6`d!up=rwz-UvJeo0g*Xk%M^zyhg%ybvS0v2W;yDNtgz8X<=kNIS{L|<+vf2DDH7JTjI^~dd`1fH6f@5wZ z=Qm>)T-@8W*2d<^%57Jx)w+|s_QJ3Qy1`puRy6R)-t7#6;2`~Cv9xEm#lF9NnOVV8o?bA!DQ4o`8r-E;XtQ?pIXkeE4SYwTL)%y&mKR6)t+Hg4Y3&}p z4U7acMi|OW&zl$!Qp$;Q?egZ@oy9 zeCY5n1mQB5lYg6@@Gjj?$68{Xlj8kHn7oS;(Q75uloue5vE*ioP)RG}maIkYCA3z;wV`eq#V$;uGJ z{T_Y^uSron z>x8^8`?J|u;X)TTJo5d123)vE+mx?PEj`abBH-_ILa5_rhD;Zv zt$+NXK$YMC{s{s71p!s_pwRQ-j5;%D5*keSopr!b+!kUvt zmVURijQUyaVq?BmGgMA1Je0|YSVlz=l%&9vM4k$hzfhRM)W_OLuu@aopd`=Epy!cG z91nQ!IsT(fPUR)~4>U5>M_b)SoHas}Pc{h(P@}X6D>9wpu_pyFQf3wOO>!wkzshQH zfxbH`gY(Id4q*#VbTNly2ofPWSvIza_z<=d(cT$q0dnaHQZ;P(%K|(5q$uE5*g1AS zhC)3?OcH_BU<6>Otm~r(-xE)$>a`CNq=EvtQ0~P2m#Bg@wQ|zTYrDL&|ADf3b z6K_(63N+%y=aneQ1+k4#q7;hn0)IcKjSwJ{XMQaGRN2=yv+$v*hzvqnfLDS%nUX54 zH@NW3PDpC`I;kly?DlW|1GeP%P$lm?Y{z>JqW`g)nJP)<_}&PyR*3Ekp!wz$qgY90>SW>?P>q#uh1F z#0i2}P-K)*n#q`51}TsOh%imC*eePL9FB0lcN;SxK5iBU7SdkhoiaC(Gm9)UnSP7- z@%dvi3{E!2120ESYtMb)Tid?h%kJ82!AwlhnPE3aWt14JtC_2nmdT)I?c{G~F*%KV ztWR!zc-JETpYSOB2jFZRHEK9Q2o9-GDal?$RYtSlqpabZH01d!!o|5215Uabd3KN#0+wzSvY^B#43^l3PN~! z3Wrq&<>`=N)sN*-CDsmMAbuq6;J=1pr7aJ2iV*S)YjGpFv_?5niVCJu`D!%ADDVN2 zzngqwS=LrAneQTko)sbx{0a?`@DTAy#%`~PA&R%hLo{t&2HbcshII{^$uhQnQB>#< zPUa@9%;-Oe<}_BQ3hjvGjbLVB`8cJd5oSKb&o`2ZKcd-ns>mK@DO=Lx`~cNcneS11 z>O|@EM+hGEj~I@~8cRzQMVv+CfRrnYaScWJPs2d^_3i(c#?CixHl}fUTJVB!&`-@Q zymfXt^@>?9b3KTKI3w&)&GDVzAP8PP;Fil&vz5A)zr_3`6fd44wq(JeOAsH_m zLE$!L*!q-~k5C~#@VWqK%|;(~bvzh$yJ1hHLDKmQ3bb*^TH?O|!MB2@c9kXx>j->s zw(v8F;6Pm@@S;U72~HL~(oiPtW8QA|h%`1gr%<$OAd<(AEEvDj`(lItrxB o;BhJi#`Vm%=5=m(5X*} zm!#92Mw^+#8+lHYpV*=zstSD9-I87yM3GeANW2XdwB%)PQw8f@ z(BAWae7A**B^U`W*@|~0ZD@JxLAZ@0(%9SH#iFES&P=x&iHHAgA~(8`x4R|VN6x1= zfk%YwNM#P(4d`eb(g~8pi?hY8pc$)1y5}|Mvm*qsicrnA3vZ*_4%0^5HsAf6fqo%@ z;lt4(>Oy|4WENo4>RTU8?z297WbH8J^x5zEebD2XrAXRqT=RuDGz%2*!%RKDvG`sh zRk9d}ptX2M#Z8baSqyk77KDnPdvUDpc7NpW-fenaj$Ib9Iqc^H{CaY|WEMJ7L^z>fHTq zoXXIqJ%eW``AZll9|8!r37PLhu4k-$yKf8LXMcq4_|z8mCV$J!W$!0oM}EyA=XRf~ z72$~7L+mpe0sHclrTnQ44Y2z50Y8KWLFV(TJ$~7H&OCiuO30esBP@gidgz04%c$pj$~Uq&A8E4L=0Wo zP;tv|K%Rp2NNT&+=+qqb6*};%1U!Ixj@F<2vM!Fk^>r*th=~@fvKjEkVO6eb7$37e zw1DnSPtpUB7Fq5(e-C`9+pwuQnFlOvfX>nt{Hi4;PRtgG@=t7ImGYwSuu?D44zvkb zZdR}_>s3v!AlxU8Sz8pPhvoX2PoSRlN${quOE()~y0jHHLfQLW)Yz6@Ht^(wPu%OGd)gOFfZ54uD7z}h3wm|w=nKaVIj@0?= z#AAX^LQkZo-^2sxv%ck9DLb&9@)RcisdEUS@AE@W&JPBOt=4ktn^+0e@4}F1*Y_Lk zM(X?8A$igcT?}|QiHtVXWvo;rcknVr-wz|$C*RM6)5ztw@b3F)35l7-@~k&KCiuee z>}QV786xOhCx92wc9=d;3)m5weY6)3_48=vI_J^pTg|~`a53Ju5A8WzLM`G|%{JFi zZ=esNWpsy3zlpK>E`je6xI*AN1jf2V`2G+rAv+F7Da8!ij9DfxaY z7Tt*YW#7Nw4WhxDg71qs#54o#=pb!FLbMGj(r0`>Xt(1uAYTHyX=go-BSkdR?(GiE z29Y{P!{QyHi-dwIy0{`7b!9;DP)5;^>gF2eLWcgYW8JsW60*l7=HzDhtB#`C5bfAQ z)V+?D+yNNsBw`i#eZ(qU+K5yfQI?K`cbD+>!g<0)PUI2uxUz60Ix5PN8I{BYVxtOfHNJwjw)TwcGeSlntKW`Q3xUXc}$PcT1LkFR|h?jwak#8BA^k}-yv zvKWEIar981f|4U>suC&8JyIJurc6zEh8JO2;~BQ^qJIKWT|xNOjf9^OhgJ-=y{OqY)+bNjnTP$>Kbwd*CYylybTL)a3s&5-4r>)Mgva2(ApL>&^dG(3TPPFJ$@6i@(9VL@_o9j zF)J1;!O>TF@4^f2G+2Dhjt^auxy`F|8|Y1!?IUh#A`j_1hYYR|krc-0!!=PH#aCB= zxgjwLfX*jd-Lw(G@wHMVrMVLp5Ylwl&`NtdKo0lp&Mt zvdQwSQ*{;1hG@qgI+vbfCR?}?2J;Cs0go*l1V@?4&LKc@n?-~~S+8Vzc?42fa72Mr zxjtcnqcR9y)MrPwn)1yx&~pWjK4-Gb@T9S^fe1Ad%cj-=j7-V}Gjd2_hfTH-T!bhV zJ2=kp2Ojwi?8%5@$V8VMS|aM7^v+}fC}SO{QkC^CoWgQsC)Hb6c}n)^I|fsM4q*zJ z8^jNgLFXwywA9D=wGXZ2&A#;%bI1nRr#y=>j9%r`$7vpG3dowR15_Nk410!a`H{7h z_u8wyb631pH%Yy9=>^`%;`A;anQnWAvg5ZkH=~a6_7GqMV2RE*63Xzkvk^2RMI_P= ziU29HHs~eZ43ywRl%pLY`blci$xXC`5(tY;nrv!@&mw;B&7G#}DEwKeRZa@Nk!oX~ zuj44SM8HTt1u*Ju0;AAH{g{Sh=^qCy0bWQn!(NxoIErTImZOJ^Aq6WoT0-icLe!ev zK-)kVz$f-^EGk1#gEsJyHk9{a7QvDnqUq;u+@42B6(bo9qTOIG(Ydxngbx4eP?CSz z5|h+Z4mM36Cpo0bi9q()QJim**BJzj{WFC*Xn84ezs65Uc+6KA8 zBZS}RxoI&iH77)Mkke58sC>ZakE#Yh_v~u#?7+Mt7a7!ddVGGVH$6YUn?+XhV18Ek zSySsktrt^zPhljLIZU!1Mc&e|hCo%2MI;@lE+DeplHFv%dv_qBnIPgN!c(L|lu3Fw zj`)oImK-T0e~=#O%5DQjSji;rs!$pS zV@fE%-eg4y&QvLZ3joiUYEysYr*vUZO#2n4lRSWRW|HT5?}|D16!POrgpeTw2ldJE z)Zi%O`~eB)YSjF-AiAEl^J{fxkM0PQXz385Mc)MKA!4ZZu+Gw7Xv@u%o+S?PrnR{4MC z^Ru&xID^~2f}fSzMKwvMd_drR0G)@XA|1=V>qwNEP3)qJrn-qlG%+OAMRF^t8I#QE z0{IaX9J8p#821O5uU9d*U>rMzWr-q@P(LDYk+wOvU_>&nuuPLw7t;A9Q$5LeN+C|h tVnY0gcNoi@do@e5uaOTh%%SRERslq?hHwpiUbd^}FIJ`|tCyyx{tfea@S^|# diff --git a/test/lib/__pycache__/dump.cpython-39.pyc b/test/lib/__pycache__/dump.cpython-39.pyc deleted file mode 100644 index c564b147a034a30d24cad0a5ad63a51773530dec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3543 zcmbtX&2JmW6`z@1lFKC}QF0>JZrUczN6V&`k|rsPR#lY5Kx(vAlQ>S%MT5n9XK1gy zAN0(!hzRl|q4mXRfnIa4kNTJPTJ(@}Z#^{qy;+i?B>T{0iFurPGduI%`@N6ZXl2D` zc)ou3@A1FdjQx`a%a4n}9-4UwBAMhV3+P{@JQIQ7)OS)Ra|1VP1dXg2G_zLF%Dlj{ zb3APaZLD!qKl20M@)~I;>jquhZ>B5RYOp$6yB2glWU?i_V{Im`3lmo{XVbUkD~Hkzft-7QKsalqhYEl=e_)}YTV9~BCougrP9$IaNXY~roWc3>IE>Q=qaqBN0F33)zIW|WrIhJ>I=9F=NuMzwy*Gf@UT%s%w;!Z3$s zVOaTLm=$uAQokF94@Oa1&uGG^S_{J{&x_I?W5V!+=}X{S@}KM5w~f+fJCaeh{gEyX zR9u?vI5H;6rH-ztFNTGNp|(q9%I!3{vn@y2aBFy|yVzp|&Cnq}_qgR;Fv!A9{(@y^ z4Wb$YafPX-HB3u+wh!Z|wrY^`d?}dAGCdU6AXzf^TkEDq3of3fM#|;N;ct=OCg;{&fl(1MadtFiFm^Qzy!o|@jG%jyT#sQX6p^c?eCAE z61={<9|&EJ8>4b??K(WqQ+Zs-B){8tDz_M_TtAN^tb1l*n&ir0!iiH;3n&Ki6j1#f zEp5Y-vv824>Iz9n7%*hCz$BR+McGj&V;kd@-VITHG(57~hWPGjOu zFt%XDczN+~^vE;bLNjE-l0C#mH`wR?lug)Y!1#ejDC-w7sVSvWZn62}@PzAEh-)>r zz;%X^HmU~sRZP|T@~%66{er$$(XE}dh@;f(ZY{kz_I5Ir75Z?u->lpe*{$*uljNq1 z@>uDYuyM8e{%9~zTFFmTB$cjQN*NZGexpXjiuNOt?=lezhFa3ss7K;j0AEJOP%>bm z+wz6WySypJn@<7=VFz<) zq1o%;09-ye@4_23+;zUbuMWdOP9A7%O5x*=mu>aux+17kUu;&lV#}v0VG;Nt&Phm(tF;=n5|Z6_wF`j7Kg0tUAo8yW zGcrl4a-^GWj4W3AP3m1*5F3I14%Pp-$cDSWqM5fq#y7vMGK)Qt2%Cl~s*D5|4Iy%QYK9ug4*0rLl$C=zSDyOb2@hJc z>kdk>1&uP|IlT1Gz}q9f1?t29OS#$hr;)U9I$%H*uWH2iq9m^qpmm2|&{3p!U7R-0 z8Gg3oyI3$w-J8z%?QbF!bf(*tUIjrIQi6q06knPy9GZ@(1+gMoH}K}k<84gVIhl9; zZcA{NyZ=2~GUrPX7mcF4r}UZHPY@BxAJ9ew^&%zMGa;On~T-Uyh8uu1m331!C;#AG@aENP* zrW9)L)K=d5d@JQkG(i`_V10<27X(!!6UO*L$V;n2@X7`L7TxTy6yF%8F5X&qXp1Ws mm(JX()|0e&nu-;wzWof?CFBg*&_gkPL3pBz|1MHM&-)*~GBU#e diff --git a/test/lib/__pycache__/dump_tests.cpython-39.pyc b/test/lib/__pycache__/dump_tests.cpython-39.pyc deleted file mode 100644 index 6fd678af6ab6d22a176f8b72687d548c272ce0c6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2967 zcmcImOK;mo5MI6{ntmja+oZ1~gj2K-+FFU5q(zY!L6airp+b--1R<4wGb>UQDY;cplmbV)Gdr`7`Mz1w)m4YUdXd> zooQ-Jdq$c|e?d&rZffcB4?GcJARi5Tg8-G&j}Dy&@XB{UaN=oZ;Z|jtZ8VozzZatS= z$QEU^*Lou2Jsu>o6;LT@p9%T}e>I2&tf_@7ZAIZu3#0hhlbeIrsfn#RCIG_`&8a!{ zk<(FTG_@ghw(s|8kNbY=_D!wB0ezW?)(Mq`Vx!C9?nN?=%l3C7S8cV%m;A3(G8 zOaSx|sAmC$==Dry0TOEIg&%1YGMa>OUoCS6Qb-5|DI`PzsYC5pKOsl@AvrdfeyH!6 zCqNF>Gp2eLGfO=y@39(8Uq5^7xY>iV10-qU?oU?acEdiC1uU7{Q@knQnS0Y;feL`O z85mJ_9*n4aaO47pZY$xGAhv@x1o!-KyL7nAOAad7ox5Lsodx@6W=Yx`L|lIE_T!}3 zS+S!zdkEawNrUnZ5L`J;E2c#`I340`nVzR1U7e0Xy;7Hztvh9F&~DZgR|O(2wKEj( z)CTr(k$m4z8 z-PYGM{Ea@X&|~u)@AGN11rZmZjm0LkVJ1++606`idi53{e8HiX-`{u{i-Eygrb3B9R*%j)ziLQIj;=;-@;IdMGg+T zhKD`+PDBRw|82;lxsz_0%nGwa-4Q)|pD`uuA1gJL@hm zcs>$_{cs;{J)T|Lr8tvnn9rjyq0o`@6^|D<2Zy2%;jK}|IGa#Zf#cLv)by4VhLE3L zJ&Ap6h|Az#Nn5(ER$31948}?I^`<$dQ+E(HS0}j`GqddRKj%l_{VG00fkc@(6mb=` z$P(|O;JtXidMe@?4k%!Yh)v&BTd$61V9GaNhwbC$uU9r-^_fGK>o}o{qL@{+6j?q1 z!&7+2WYO$P0yWKgY6C9u`Z2JHZ) zlZ5?JyJY9MNx8q!Ua)m9sq`1yi?;43Oa0~c@^sym_6p_&$+`Y&d)4-Z$@%_TdyO;2 z-(|8S%MX|=AB$t&eh%#gSwXv^R(cnXo%Zum?6BJ6x8Ru7cu|s(iMmOYWi7Pat#s5! z8E!}B&WG_`MSWC9xv-U}zDmumR@A+?Gw9w`W>ZS7vJ4&P)M1>NINk5;#fj=fdcRA1 z%s(C)Z{y3p1W~NbC2Nb1m~u{B#lC|4l=zB0bWVBO1K&bA;M-9?xDHgPO47Z`RDjY$ zSyl^>g8#LUfeatGIBP{Ms-U;D$MH`nOBgB3g$Eu+meq=^e9h&eTzcTP`WhI099 zSFXr&=sPc0A(ituAYTdRH)r}cM80r|__LBEl!eU-#%ql=y>l{#vuDNBvbeTW~^$~T_O zdG-6|42MbFjZ8d9p`+kH=?vAb0Q<9t&gA2J2VYhN8M7Wc;iv3KjG+(7Efr&qy7Q1u z<+k!mO#=-p(|pWlea2_~5wGWNqZjF#DBLvatHMhLpDJB*3O7^9o?gHvZ7-prZ(MWpA7$XJ8Of;M+}4Ah>YD637H=ArnPw8-Zesn;$izw37~U`3A&m3|ER>N?GG6BS z4`;%LDxVU3gZ^#Rf+Fm6h=oq4D0e#jfgHhXJ4L0_xi^X|b7*;+?{2kri}KFyX6xGK zhc{bWg_9YrSFwxbe2$z$Us0KDOFDiIJsHV1*EZ+YIXGb1q`DIhFwi1jz@@87?7W){h9C^R@((1dRPJ|RVymQJ<#StIl zFgV0ZwD8!91RFT5ynLncT5Y3BH1|ekul{DuD};t!_9N51Q}|MK2NF$aHXauKz8;K* z8CfqpQ5C#@R>|dIq%+l_(HgNdF}e#pug$biMz^O2{SFyh<7zVKMoIP-Om8;tne!1i zo~OkwgY3fzKe<3B>aowPGafMQj`?52J$Gkfcwo;vh>{W5j*aRQyP~mPaPtlDyC0?T z9z12jxHZi3)dctqxEQ|G#18+)$G(@}Cv)0C2Ad14wxc&+Bxqf9ilhzVGA8O2#gUR?ZurWJ)=*ZI8*)JXWHWy-JpD_Q(x2w3aA{S4*v46^GE#vos zu|G!S=!J6WPY$HB+{&NdF~~xqWRR*VxU3SJ8`W2`f`3}Lz33py-8%G^hxPj8xcRwy z9S5x=njgenRUhdjzdUVhU|>V{V_0Rgm&&*~spmlx1E}YV^*ZEIpGxd0XCYaWMr{aK zL&ZX51#V*F;yf^NOh@?KA{@kNQO-uU z;Urxo(uKEe(azu5y>|1X-C8iQ!fP~fi^z2%@6r;%)M|yia0#ew-~ZY_yZGs;hj@J(U-kyb?0Q1Uy>fdVJ|EYM z@C7!ll~5~7fhz=D4+;N*^x!gnu{LK<+mVj?lnBmn@@1@S@igiq2ARVNa*+gP4=TeA3(gS)|=qhqVA722RKz1l> zp0>m8*?!6$DU(Ci7T*72mbD?Al#CeTzBCpHD-LCL|2J#nty;M#$61_0WocJUVlKJe zMD2v28dv6tVkcT8sUmhnaZ>XqR!!2-za`Qk@)IJzAo4`HHU`7MQv?t53C1!qKT7aj z=(UV@m0uJUioWwf;<4Zx)2Xdm<&5=e%OmYsIGgGHBD`sngD5GyF0M>>i;`VLQHl|~ zGe~W?v#G^x%$Z}+GHdOR_-o^{`5;n$p{b{Ku~>L~%T}2tB-6hkLT0Le1ycC94#1Hn zB6yv8T!pK+b)-TJGV3H1mdzd9eQdaHmk{O~2+2y>Fs&6$3ozVB(Mz5}A?UWf?o4h< tHo>y0njX%YCh_=P$0@?AlS|QC6isbfTA^HL`c{A(ctt3P3cm8*{{V<=N2ve+ diff --git a/test/lib/__pycache__/net.cpython-39.pyc b/test/lib/__pycache__/net.cpython-39.pyc deleted file mode 100644 index 28d6d231f01a296eaedee381e8a291f3fe895784..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 564 zcmY*Wu}UOC5Urjb$GOEt5H2-eP$#}aL@~H*5!7UorkN`5c5k+4?dk!Q%M^dYGZoDJ znof=U1tY6hg|(nwb#)i7-kY(*;Q%Bao^G3asn4ldj$E33N!}DdK-EGGzW_7TH-MU8 zs;+gNp*n_9{vtClZW1nsmS$g)w*&}kg-~awK$>z?yFe>aGpQQ&e}y@&+~jqEg)pwh z;c?gerEq}gi8tNcOqP>x`TTywAuJo z{TyXJ&H3a|xN@fU>=Aq^?dy-~l*w~t(ax$P-%Y6rTs4;2I`np%E|LQBRUi&kA0{nV zj>69TGOilK!5fo!I1>PM_5F*9osWsJ*ei{3c1Fg;fibfV`!=1|j5+UZ`|+ZMFRmM7 zo$G=Plg_cs=!pwH{kw(YCHR)^(&A){3xRrgy4`%_;^AN=Xzh zq))&VD$!#fh8Nf?Ctjf^{h>>nc-uFbbrw(gfe62~F!F9EBz_?F%t#na*p`-}#0AL|@jOGwt8MNxCdUX#8il4N@3^saOm zb1DiEaV=;E%gssh##wTSHabtvsQh3j-X#<5oGuLMM|M6d$y-Xibw0_`^MsE(u6&xg z!E90AX$qyqv`IbIG;JZR>p}?G!FQl|(jM&pAaNrNq#=}lA<-$YC}4T$0|P|Br7i=; z;{_dF6b3ZetyrlyJI|7K7#eEq@W)eC8Kh z+x@7aNDI{^b(>)W`lcOb*u-p= zdx20CXHnDB)6?D4{r7)-|KE+`&`?^z@A&t=QA(Xwlz*g${-1$|bGZD!P!&ZeLiH3| z<-2BUeAjKA?}lygJz*#KZrUc_lXjBtmTmDpWvBR_wljRs+CzLF=DX&NR7dSmmA<8W zW7RQxtU7Lw^RwaYsZQ7voKJXrtCRL5=S^>4b-%rz^GPpVJzyWG9<&cu57~#RQ}$H# zuzi@{S>BQAQTr(8RWE1f_`ApKV|+huAIClA-BZ2SzL)Eyz5A;7+xK%m<2_J4VV~f9 z*2`BPv>)XB5Xw*5CpoWr57`f)k6~}RI&DwKWd*x{vJr2lddfbrqAJfQVqEOGrHDQ5r?+(b44x;%UOex0590YVc%Bsd@Vt+oA9Wvdr`*rDk1lHm z6tVw~CJu;$w+#Dn)H@`mP;Y8WvwsRths6;*9l_ICaa81PDfSaWT~PAJ-lKJ%%d6pV zsa^}*n?bc$E3UXQuZ5N<25wNUx?ze^;d((46%%W+*zl1VT153r^Dp4Bfd*=5Tqy>t z`9x@3F8e{4o-3l!Vzc47p*7zK%Jo{&3lrCB$b@Fp7LBZ?^cqF!yVSwWPoH`W_arr) zzjOgvbG7KNy7W0+`trcre-J^37($xxQAE zD?HkTdg;0w%nBi0-!IX$`+pi9&f)SOLE!Ia1V~?lZelVOt*r;fhPH)g71LW#m@*ri+W`p=E)IGPB z*X1F8?U!39S!yoxJfq|&-pFG}rg8Zu5=G6bqiSn>aJbZU3Hqt~>kM@sCH^E58fpbY zEn{HcQolw61m&XvYS92wj^jcMq~pBP858kOXhwXrb6EGIS18O54ae2izQI9tyZcE; zMm#YvGOU?< zgFrHQ@>;x3TtckaTcR-IIB{Lac?_>QLpiXMBHdbZd5c)F24Q!cFzq4Q8BmFN>bb@I<$)G5g%h2COz2Vj*aeAm1p+S?BGiZ6j1eAB+ zhe_B1Su6!8MJ_bTq=ggAF7i{TZ4VVADmk)Vr~2_@Xja|ok}Lbxl9%xbv^j^%C&q+! z(#BOwozx!ET1R%Lejy&fT;7xgG?6ouoT7wBJK{LSTCEOdhS~5TM)ZZgul6|TVZ5N1 ztQga(rAaEei+5=0&QZ0Ok?g`diMHt~Zz)1^wYQYF^=;Kox)%6JzoQCRl3T`>YNy<^ zn-L~T$f8h5Rt&ircSxkcbs3Spm9U3J3X3!x?z`$%>w$YW+v%{Np;{ml85CCr*o$V~ zWv{Q|A+l&-Fz9Tdg9(&tLSZvY(vF(wQco{gPUNrwz*;8ZQ?Gbnj2s$ zSjGbiv#7bDx04&DND^xbrK!r70}IdA0cA_oz<>+ND=(9J%ctfBh)Tjprey(_Ps)r2 zTUIwA6K!={*@VSxYp|Q(@=XTBRx-5!ZwJ3tvl-}U9sU@Y=EN_MEc zQD))*^4*j!nQk6BbF5~3WEhs3)v{Xav5#4~0!HIt$qHvZQYikDg`NGiMtX*Ox^VKP zLM!!B0Ydpwp>VQwb~OkZ{^K(<#YVZXQVv#|ONCOsIwRdi-S4yb{aNXHF3oAiHh~u0 zz?VOTdD#<7vc3jL;fVTL&8ruM1D?vKLQT415`8vE379AYmI#y5eWeKv7ba0aiXk*A zJ}{j@4a3xO)AOpuptLGUspXTQUh;hTG?9-iF%$uP*nzHuil~Ja;8+6^>Dt+NaKI+Z zbvwb;c2YbUlk9Wo$sb0dXjXPYn@~?^Srz8@=qHdmuSx=V1AIrlbRo?+9s(s$+W>#t z(2zo79jVW?Chx{q)9z_EO!t-uIEHmi5{MXj-w%hIMAQ=4%_T$7H=C}y33I|-E;hZO zNBeI5$Z(V~q{x_Rtw%p@exK9{q09Z<)!p%hh$e&wIzJN`QXC|c{`Zk=kSHs1v?GfF z+ovsnK7xCd1agqT4ZUI>QedTwO0r@>e%XG(<{0hNwj$?XccA1J2xny4S;#DG9Ap>u zktW_wkS%N3!H_V~5-Rbw{>GzUQG#Lm6RA7;FDlEbNQ=y^>}Dbusf@PAs0UcS#5Qc* zT`KB3{VM7OrFJl~f*9R6`p@3R*IrXzRBGBOr8Bbp(A**caac6~K|izroczG4UvFhE z#IjMYt>jupE~w{PdZADV6HDvB+3&I8)NP{)WNoJ@b$m#*Yg<(U+raDZ=yqxeqJG^e zi<@CO$};3lk(G5QdzaThE3k)F;W(1lfna$ZCSE7d4JECjjI<)Mq=R7utDVq{s2nD$*C8FD$+Y&_+=ixCUx6PABmr$8i2|Wst_AWcbwPSI z%s~ABQpvIl>}57}Emh-ShUv9wn1lgO2ae>buNm;ht}SU*J{P10;Z zx-bQkbeU%^>+~X1@}LL_Xx4?asU+(YhAG8GOi`vXffU)KgeTNX7*CNdzKn~NCV`p> zn!-L@dZec{z*izDtH*vfiqrxzA@#sGncM`3I}C9J0)AbSM*=N?C=;MuP@oQwgBjN5 zLh}1)@k6ub24H+%*@y|KH@Fi&hnoy2860o8zXKPS0ke7K92iRs@HQ&@q?)iQSdXkG zUq!w|1^qu7@i|-!Bs%DfaD62qRA4J$dLVj}kr;XKz$@_NEn$fi?(pj|gG@NohqfB! zcc2#t{UUx4W_;gs;*iN*GD{?5a_#2nvsWj-j4H9)D}Nq&`5GmIM36a@{W`8b7kywr zySwOQ%etVifrkKIT?O_m(zX#!Lf&BC;z?+50(sCSQ3NW#>`nNZu||G(8&&(hA9Vd z2`1U|-a-TR5*0NE*+xu3Ywg=X-%i~D_noAJ_=}gA+)MydfYt4Y>suoP$YyRlJOiKx zY4)LWe+iXvW7Rac#$g6k00Oz@w@|I?HV=-Zuhsyv;6)D?90w;99@cH=;d*BjWdSSiBwH)+Cxhv-w__URbz%K8h+bD>E4+ z39x*Fk{%^E$9n2Zc=1hKEaH|rZdfTe?^IgCHSp^?VE>BZj);@<-Zt3rQoD{F#=a@T#Sk_sExi1{=t;MApq^eeOD1>uVmE@c2_c0ptSPn1# z09Ry2Ao!*R6MpD!lJ@j#zdgFdzx|`oCGTYh;#$hO%I^6o9_9VRLeH-{{dp|k&?E_es1KV)f+XwOX5a*_X!|fxS zI~wJJoIrr$wn_nm$}!YCj_V%G_}$%H;Xg#p#Rsq0^ay{2$z}$i>Z@4md94+HPi?S&2dD_#NM+g9#-psN@K9g_qla|N z5g?2mZ!0;qCPgFd35lH)08hB@*lg;Qjv{{O+`vK$XUPRyxRNv*e2<`1tFPH3z1PKM zOjMHY3x}YJ!E{i{KFEcsM&p_FP=jK`H(VxC^1|E#Wg-n{kgmo4PkE}FkG!n3od zKYd321`{tHc(hz^qDX$762g$-UXI^@d&R>Va62{Y9Sq=4aQOt|6xC2QRr|4- z$g0C|p-lSw@P5;P%M}0pC~aA)_K~4$9~uesM@G{8ZzE;?r;!=`cVlQm)qZG>SU)hv zsQn`V`@?E$swX5J!&m59gT(=8MEIbKgZ~;2&<%iTh)<9vhQ5YYKC=nL+lgiH9q|ca z+@ZK*9Q=fiL{xo1D^X1SO21Cxo)d>Cs2W+eBucDyN|>yfNY7KeL=nkDu>1n3nnJ81 zg|aN=R-!TB9ZHfxx`F`kZ8%cS@P;big?$_$`xy6`p*~r|W#?KWYwprC@bz+erLbPC zA`rZ0kC8e9=VDpOMZqgXG*Yx)kekW#=jwJ+ek*OF>^Dl0~NIs~F5qdKf-F zGy*gfJLg%*E8<9f4prY&>y3e#k`qIyiV@Y7yJp(fwiW*}W(Q=?`vB3l!nq(JI0{)Y zJMn*Eyp2fYYbsEcMW_qAVcFOtZRKCU8WSO)xck_f;&@9gV%%hpARF+1g2 zhjX-t3NkgZ!_|OAafP>@TLTKn$x^O$G`F@2k;oAzb-uzK5|an+vB-X>dQ=UqvLA6_ zL|>y(_dNL(%s$i`Wr1@Ibo3(Od#>B)xM`X!?V!8-Lp0b&WX~c)JLBLtTnDw*;a#|_ z$NB>cK!%D`C1DJoJZoYlR+WhI#Hx}41z@yJpx}8@NZ5=N3|tA?GKAL@w67FPkwpprY2RUQ3fFqdm%OpPKkKh77NO)Bli-(wYfTu(Ssw19oheRT*p#Wo=dJON>#%Vd5U3);K~i z5NhhI8jH;wzDIG0Z3V6dLh+G9B8*#Dr3)}DWx#WhxTCL7AP0`_T?Xx3zd>W_C+BSa z;Zq3LbUu!Wb$^|}!vacpwk!Dj7qYZ7hZ6+q*R@v+_-{lh?75H{$F#JKdv$qpb3~ zC_YOfKxe%aRa1|s1svfR#u1JQqyY5VFvVMU3%Yd7;(!34FBpXwB9P+i9^Hzx@h*^r zPYcb{AV^m!yZa8k16tH+=SO*RA6TR?7>bs57#!~5Dou=3 zLbMMu3hS~fH9N<~^6Fwfw=-6|MuWOZ32ipXNlKoggyRIfw?vRxeut9Zr-V?3B%X*o zP72IMyE-|_k^d+En38uXA#5Uv_^eSQFi$W;W+?f4O1?|U{ggaNNzbx&t%LjqUi=O& zzknpIPGAQv3+}OKCl1!t(yfn7IFZ?uHIqJUSyoC@(_>Rocet2g|7`Ji426>!{iSs&CbMiJ^ zN(%qIPEKG3d7vITj1?dZ9Sk7=Y~4xl7B`(M9isiTXrB!sU$}aGG2h&5beO6&F+JVr zx^UBQ_$bB@>Wj0_AVQRcYgfEclsjt0J8LcL%IxB^Gf)bsnual?7#55qj>_nz;AS}9 z>)7dZ++P-V!(mozQU zN1&&&1NLal@Zo6ZG;Sv*j}QF~nucnvkKH*i{kJIoGB{NJ9Wt$F$hScBuIr(9j|mC* zD+uk?We@kzp=@k+^$L1Gh`o30(-6z^7BcUc%8(vuzc?J?4`Ju5-js+Ekj$`fhE@#` zA+kzxFUs&=`aZ!$xno5qkU{XO+l(a65_|w^2C2g z1w8v7P>#KXRCm?BKt#ylMHC^USeW^Nfw-8NP(ENnfEd;XlgC=eKbZvhB#j|NnA!7+ zOCHYA%O9a&2XBQVJGy#_2EjH@op?xoh!*)L!wl`(7BGLr$3uf2WqaQ$7Lsa7>O8$!yaxUyFhpFo; z5SrC>4prNG`-=+nj|hc&Swz9=BLnYNi!JO26c8Y-!qvZl1A?Q}Xa0Y$R4DrERVZXx zE^&aKqwMnAG$gVjl1wpl#S$MEGdi^Ar0-;SLWLtOpqm%bh@OBDTII7VN=b{D5 Z;Pc|xqdvNKbYygPe`E#5$gd8I2@24PJKE3s78YFEjvJ z7KeJNJd>BEO=tR+WQOWn{z-qszILWF`3reT&%sir<+y35O9Bhv00#%>obMcL&}dW* zc=nb)5C3)AF#d_17atosx1h-Ph+!~;k=UTcn^05tEo$kvO>O;lsH5K{TGH<_E$g>S zUHz`mihfsV72alCOX{>vkf#+lk_K()wjD1di*zw*(x&b^@u_5qF6nkDKAkMnW!)~v zm1Kpk=(ZcLCaZK+w=40PL4nGlIS8kNBl8j3Yul zewNi)mD$KUjBFXL)o)Or-4@AfjQ4_3tXLE(=(;RWTtx|QnYrCbM@eo6Bb5QAJPd@2 zf*6|iMxgrmUN06^dm5g9(=b95_{9fIUpT>8XmEoPW>Aw`hX%EE?dXG= zdA*aeVHTz8LB@EuRmxr8PlJT}eqQzcBx9o(+jZZ6JPP81A{^kEuWWXhUsUqdZP@w&S0W1K2iw~lAAr0u z7vuJA4~6n@%mZ6%m3qBQcyk+iD)VG0f+26`HfR>qZHhA>QC<>UjYJxnDCF$1;Enw< z6uAo3giH+IP~^}Um=p8Q2I2%{BHfl%aB<{l$ORfON+ad_Ei1Prk9!5x4#bZ9n&cJV z4`WcY?-%x10oLL?R4v>{YYOx@zVzyYwvU;I=wQbWwo%@@P-F_##2A2fcP+6zF{Xr> zM}S*snarBCtRs*gX4%Raly*&V3)`wP zXRjBG(Dpnbfd!l5dAFd*KWGUC zWNIqdTR9U0xSJqNX;7Y!fr}^n%sc>8Pb;|Fc==;Md$BW&)o?6bdU@n(fq$#J4N0V= z0Nz}IF9eV|(pEjU`$`S-5`@_&T+k{8rY+sho1O0Z#z*(N+y3qKb%@(r0`;XWl2=q; zfR9G$PF~v9P4r`6;X^Eg#3OPCQj}2CmHm-ofPmZ{K>C=)AnM?*48+lEU-nd2j8YxE z4^SDg43z(>( zdah!HaA=h@3Uf0|sJoYmU7tlFF9$`2rqy|#67S-g=UJ~ntBvOnK#;hd0VrKE7f=V* z_`(mIyxX#00;%}zN$z?djkvdg@ED3jJh?<*|Np|;>vyv>90>tRoTstlONvBkPdGhl z9Gzan?(fj}0oc6^MC$|)?HLg584#W4eM{jT(YESUTqev7Bp9FCPlHuem1qss>92rCONui=19E^LxF!I7U0$UzW zm3T-b?THK|dA^V4s8te$I_7(OQ5@^@Y8gC%xpI6C=S4l7!@ZdXQ74>rTMObnm?D0T z6}pA^1y;Yr>Q`8y?~7k!g;%mU!tzsaH37DpK_t1{dBjhFK+AEqvjZMN zO9DJ5OK4r{Pmb9fZG<*V%x(OVfmppigxR?ZeyC6pc`eN1c;-q1w-lD1&f$Yc;0>|( z0I2jvDFn_emO6ppCy?qx5H1Xkfk0HCq7{9AZ>MlS!w*;5Ec$cf6y`^lptVPVj6zUr zFWSkS_0FA#_hWRPB#vNPvNbPK}aHLPGam?P74+@in^D31&_JYY3|2?vunjI*N1TkJ?7cKg(} zr7$ARMP0)n+!N+tCwr(s=cq#;m7-A6OeZ7NcZL;z~GRkt52>l?%lTNqQZ9j)u zMPe#R%x)9qDK6q*IhMRd$ndH(x=72d;9p%a3DRno&nXYUD=E%0U zALaY$fhvUM6F_I9+6bcxRhqibbO_(GfmUX!%aomh6o8J4+wj)Ef-q!Gmvli5ol{Ad z?E5DRX4u%9qXb;vv4v;+xo3hUt8bW=^h`MSmUP_y8KUqDXiun#aOMX2X7Dvz@P!Zd zzUhGd4%qv14weDz@>jb+dqN-W%;DRxliei@;?Ch&dX(&w4@mvf6++!}52S>-erwlK z4Q>y1Dj}7g4RlqRUN!2O1J$dYD|+{?f3edmrnTwaS3S|oAwGlNEie;3O9!*7|8Ir| ztvm>thPc*)-e=&_-Yn>4H5}3JYIL9F)kKw+ABiHjJO?y3+7~0$;IdIxXC)j}rV3U8 zfsO8ne5!70U1`hB(L~uWNsvlOV%w^m7D_|ttOsB12i8mTx``hh*`PseX=Z8l8_^fx z`+t5syj?3@4}}!Pa8Flbm702(0&hhrwYaPfCzUoz4~?qLFwgD}n^OjpBikP7sz{JA zTgU(ugoUh2H(5l_u}vD$i)?mw#je51zE3r#c8WV*HVO)|Ya8DB90)@UuxZRBJx>;7 z4tf1L#?IS)fu^(02@rH$-_w{L=nI!WiET}UcKoGrh|m+6X?NpByouIlr5cH8Zj$CC zcaux7P~%yIt<%{Xe`9}z?QVZfBTyS9Nm0ouByM86N%Cka@+Kq=s*NNOWm!Q^WmQ>& z4K67_n0VFQHo?*5IwXAR-phb;jt{ z2BU7}A07q+0(G>hnO8x1Sw$p`p^A)sc%g=jL#!fBYl9X^_LNIc@?#!%qz@4Y?AFRJ zdqRuiE8wA{S)W_~F%s1J;D1p70nK3(YUfi#01ZK4{9UN;fP#esg3a{$9LngdYv?mJ zr_bqK2skIkJKCdU0Y!XvaZa9bC<#!$=4|dghZMg{?m<#7__w@_n*Iv8(xf@v?FSlZ z>3q?+wSDt2RTGSa=C1eHo1hD25;`RsV9gZKYNDSZVVfEIXGIU^xeu zcabj_{_B^)$%b9XcqEH{8^t?#I!BV|cTx4Omq#}I(%}-h_0v2{AKJF_u7dQlt?jBb z>d-Vd`4G=|6&v1%1w7DEI0RrCXZl+y9o7bDlT1wp1qTb8!+CN6M-RmDD#$jx*5uA7 WnAI-vL)wKW1TH&`b-}%x7ybh8uKF(k diff --git a/test/tools/__pycache__/dump.cpython-39.pyc b/test/tools/__pycache__/dump.cpython-39.pyc deleted file mode 100644 index c6cdd1c87c8a45c37131bc603a6776d59bd0a9c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3037 zcmZuzOK%&=5$>Mpd5|J0ik9Dp5J5IsCKhSS8ze9sXC2v_*IH|=?JRa+V=$R+Nka{1 z#@$0pB1R|ax(JW}xvV!Jk9)}PS)i{u<(@+jY#vojQhsHKE>2g!s;j>GYNBSdPT=|e zH-C)(x+HZbV=QpYzZ-b)Z>RpK7Su-d zpl;@|bY;{C8jCf}plRl|f|elDRqo7a&=#%f8h2-ud)z%GCp74AKR72k({)~(QPDUd zq7JP&J}7J=lJ@3b#U3tJHMEB;cOnRx_H*Zb(i4O%lyjn)ZhxIk4W$8-_dv5J*wHm zsNCyWdgZZDH!KGS5@Z z_;^&rxby1|#Di=>Qw)ST-I{!`m5)YI##Oh>yKz*MV=1n6Uqorbqf&HXq|1|?o$g4K zGKp0eH&)wU6NzDdCH{45@I(oz1`&@&gNHI7f>CM^M@qpKGTIP_MK0k+12l4gMydhs z)h~{u3tP3(E(-*)CLJ8u0IQN6nA|^#Myd7|w&@iPx+IIrB+t-WulYtIFk`a`ZUhf1 zd}zOgrv3?{B2zLWl$5lB*dK!&TAXt03ko`W%D7$88DO-crE`@`UCw6o55GFLxdSV` z8R0JA%s<6_{293icuOd+L1gQE1>PDj?PFHfruB+dHtzEo;Y~&uIk73p+Bb+gy$+}2 z?{jF&U7kHXc6jR^dG_g3>)1W^_-f@0SH2{?UAbS9uh_9)QP4C%vsQVav9FTjTG<@7 zrmK}-)$k-^eW&ta&bg|Ft7dSg=YM(rXWj&ytxrCGR7SGwM%}GtP`V3DU0VkJEXq4F zAA$1i;2Aim=!NK(dt!u$;fdPsKL$|x;xJKVnD0-%vt>{O*gV?~U{0MDnfN+`hEu-f5?}^EVaDmMJ?KF?0RNd*nS_WHtFCZ-dtAStUMV{unNAePG z?v1nkEI-J4_Pk#^b2s{Oo-Ervt7lE>r8q1HMH(fUw#&m3RPCrJ(j+!XFc_*l(+uUr z+bhfB)?hH{3>FFd^TUlOjzv-aY*O1;==u})GnqqnPZ&m}*Vc|gY)JIHc1HWaDcS{7 zcfp4&$#!zxx%cG3!|=h~?O%U%|Iy=c`|gALn&pMa=EnON#S{RbnaS3gigrYZVb|^D zWsI+dQnG`mxMP{7$?vr%#q%*_v|a&)M3NCNM6WKPvJjKc+TA~h&%N=JpzHn>RJumzL|aB7-IJS>1u-wtU5ZCXa`BAlVFfgql5`Ltm*thQxYuH`fL zTlQb;KWqi$Kist+S`FHu9lCC{EO`UwzeBAKC_jXzehNa6fN~N5u>`hYLkF5GyrF-Y zEZ%D7&EeKdARNw4Sy1N=pxD)G_rN|Z1P>z_jUL0*3_Ajf9pL1#&`yB^+K$UZ?Sj9? zGK-PUmk)9&HpBCu!b^!>L zF<=s49gx;ntg?X5fWs0&`=1n8fTC18F(l%?u9<)dWF2oS2lhEE<()TeHCCg+aub9J5Jt`iQ;kOZ z%d(4K0|@ILHPL^E9g!_fNqeo3-~8QK&3==2$Ke0nE|g?Ql|3<5KTzEQI#`0YjVF7S zIYlDJz3tkj$(N;FPhiHpNHh5xc!%!{_%_fEPP#Gw+x*&+ALB+3K)g|lkzD@Y7syTc z0zo8n%Y*z4%atiN`M4~`B^Dg>AtC@Wv6ve!xACC2z)&+5qg_v7bZ)I9KY{n&RqY|E zfjhK^G;j85JTR+!HBe ztCC0s@;(j%H1ctwYrCTS*pR908uhLO_1xzM`+#5b4W@k{y735e0!e0pDMJE?7~^tt ylh^Ki1}g2}o&)y|I$7ZYq%?%TZ?)%dRvr3vhN*!S%%_*$?{s|Hq8$fii~bLfjv}D| From 84cb7cdaaa84628a1a40fbcd154d8de0d19edf44 Mon Sep 17 00:00:00 2001 From: johndelcastillo Date: Tue, 7 Mar 2023 10:10:22 +1100 Subject: [PATCH 19/19] Update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cf24cda..8a79cae 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ *Project Status: beta* -# Note - The Cassandra-Exporter tool does not support Cassandra 4.0 or newer, see the compatibility section for more details on supported versions. +# Note - The Cassandra-Exporter tool does not support all versions of Cassandra, see the compatibility section for more details on supported versions. ## Introduction