Skip to content

Commit

Permalink
Fix pylint running on test source files (#1315)
Browse files Browse the repository at this point in the history
  • Loading branch information
Shrews authored Oct 3, 2023
1 parent 8945491 commit 60cfaa1
Show file tree
Hide file tree
Showing 12 changed files with 104 additions and 101 deletions.
Empty file added test/integration/__init__.py
Empty file.
38 changes: 20 additions & 18 deletions test/integration/conftest.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,38 @@
import json
import os
import subprocess
import yaml
import pathlib
import pytest
import pexpect
import random

from string import ascii_lowercase

import pexpect
import pytest
import yaml

from ansible_runner.config.runner import RunnerConfig

here = pathlib.Path(__file__).parent


@pytest.fixture(scope='function')
def rc(tmp_path):
rc = RunnerConfig(str(tmp_path))
rc.suppress_ansible_output = True
rc.expect_passwords = {
conf = RunnerConfig(str(tmp_path))
conf.suppress_ansible_output = True
conf.expect_passwords = {
pexpect.TIMEOUT: None,
pexpect.EOF: None
}
rc.cwd = str(tmp_path)
rc.env = {}
rc.job_timeout = 10
rc.idle_timeout = 0
rc.pexpect_timeout = 2.
rc.pexpect_use_poll = True
return rc
conf.cwd = str(tmp_path)
conf.env = {}
conf.job_timeout = 10
conf.idle_timeout = 0
conf.pexpect_timeout = 2.
conf.pexpect_use_poll = True
return conf


class CompletedProcessProxy(object):
class CompletedProcessProxy:

def __init__(self, result):
self.result = result
Expand All @@ -54,7 +56,7 @@ def yaml(self):


@pytest.fixture(scope='function')
def cli(request):
def cli():
def run(args, *a, **kw):
if not kw.pop('bare', None):
args = ['ansible-runner'] + args
Expand All @@ -73,7 +75,7 @@ def run(args, *a, **kw):
})

try:
ret = CompletedProcessProxy(subprocess.run(' '.join(args), shell=True, *a, **kw))
ret = CompletedProcessProxy(subprocess.run(' '.join(args), check=kw.pop('check'), shell=True, *a, **kw))
except subprocess.CalledProcessError as err:
pytest.fail(
f"Running {err.cmd} resulted in a non-zero return code: {err.returncode} - stdout: {err.stdout}, stderr: {err.stderr}"
Expand All @@ -84,7 +86,7 @@ def run(args, *a, **kw):


@pytest.fixture
def container_image(request, cli, tmp_path):
def container_image(request, cli, tmp_path): # pylint: disable=W0621
try:
containerized = request.getfixturevalue('containerized')
if not containerized:
Expand All @@ -104,7 +106,7 @@ def container_image(request, cli, tmp_path):
bare=True,
)

wheel = next(tmp_path.glob('*.whl'))
wheel = next(tmp_path.glob('*.whl')) # pylint: disable=R1708

runtime = request.getfixturevalue('runtime')
dockerfile_path = tmp_path / 'Dockerfile'
Expand Down
Empty file.
3 changes: 1 addition & 2 deletions test/integration/containerized/test_cli_containerized.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,11 @@
import signal
import sys

from test.utils.common import iterate_timeout
from uuid import uuid4

import pytest

from test.utils.common import iterate_timeout


@pytest.mark.test_all_runtimes
def test_module_run(cli, project_fixtures, runtime, container_image):
Expand Down
6 changes: 3 additions & 3 deletions test/integration/containerized/test_container_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
def is_running(cli, runtime, container_name):
cmd = [runtime, 'ps', '-aq', '--filter', f'name={container_name}']
r = cli(cmd, bare=True)
output = '{}{}'.format(r.stdout, r.stderr)
output = f'{r.stdout}{r.stderr}'
print(' '.join(cmd))
print(output)
return output.strip()
Expand All @@ -24,7 +24,7 @@ class CancelStandIn:
def __init__(self, runtime, cli, container_name, delay=0.2):
self.runtime = runtime
self.cli = cli
self.delay = 0.2
self.delay = delay
self.container_name = container_name
self.checked_running = False
self.start_time = None
Expand All @@ -37,7 +37,7 @@ def cancel(self):
return False
# guard against false passes by checking for running container
if not self.checked_running:
for i in range(5):
for _ in range(5):
if is_running(self.cli, self.runtime, self.container_name):
break
time.sleep(0.2)
Expand Down
2 changes: 1 addition & 1 deletion test/integration/test___main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def random_string():


def random_json(keys=None):
data = dict()
data = {}
if keys:
for key in keys:
data[key] = random_string()
Expand Down
6 changes: 3 additions & 3 deletions test/integration/test_config.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import os

from ansible_runner.config._base import BaseConfig
from ansible_runner.interface import run

import os


def test_combine_python_and_file_settings(project_fixtures):
rc = BaseConfig(private_data_dir=str(project_fixtures / 'job_env'), settings={'job_timeout': 40}, container_image='bar')
Expand Down Expand Up @@ -32,7 +32,7 @@ def test_custom_stdout_callback_via_host_environ(project_fixtures, mocker):
assert '"msg": "Hello world!"' in stdout, stdout


def test_custom_stdout_callback_via_envvars(project_fixtures, mocker):
def test_custom_stdout_callback_via_envvars(project_fixtures):
res = run(private_data_dir=str(project_fixtures / 'debug'), playbook='debug.yml', envvars={'ANSIBLE_STDOUT_CALLBACK': 'minimal'})
with res.stdout as f:
stdout = f.read()
Expand Down
49 changes: 25 additions & 24 deletions test/integration/test_display_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
import os
import yaml

import pytest

from ansible_runner.interface import init_runner

import pytest

HERE = os.path.abspath(os.path.dirname(__file__))

Expand Down Expand Up @@ -71,9 +72,9 @@ def executor(tmp_path, request):
{'ANSIBLE_CALLBACK_PLUGINS': ''}],
ids=['local-callback-plugin', 'no-callback-plugin']
)
def test_callback_plugin_receives_events(executor, event, playbook, envvars):
def test_callback_plugin_receives_events(executor, event, playbook, envvars): # pylint: disable=W0613,W0621
executor.run()
assert len(list(executor.events))
assert list(executor.events)
assert event in [task['event'] for task in executor.events]


Expand Down Expand Up @@ -156,9 +157,9 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars):
ignore_errors: yes
'''}, # noqa
])
def test_callback_plugin_no_log_filters(executor, playbook):
def test_callback_plugin_no_log_filters(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert len(list(executor.events))
assert list(executor.events)
assert 'SENSITIVE' not in json.dumps(list(executor.events))


Expand All @@ -175,7 +176,7 @@ def test_callback_plugin_no_log_filters(executor, playbook):
- uri: url=https://example.org url_username="PUBLIC" url_password="PRIVATE"
'''}, # noqa
])
def test_callback_plugin_task_args_leak(executor, playbook):
def test_callback_plugin_task_args_leak(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
assert events[0]['event'] == 'playbook_on_start'
Expand Down Expand Up @@ -212,7 +213,7 @@ def test_callback_plugin_task_args_leak(executor, playbook):
}, # noqa
],
)
def test_resolved_actions(executor, playbook, skipif_pre_ansible212):
def test_resolved_actions(executor, playbook, skipif_pre_ansible212): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)

Expand All @@ -237,7 +238,7 @@ def test_resolved_actions(executor, playbook, skipif_pre_ansible212):
- debug: msg="{{ command_register.results|map(attribute='stdout')|list }}"
'''}, # noqa
])
def test_callback_plugin_censoring_does_not_overwrite(executor, playbook):
def test_callback_plugin_censoring_does_not_overwrite(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
assert events[0]['event'] == 'playbook_on_start'
Expand All @@ -246,7 +247,7 @@ def test_callback_plugin_censoring_does_not_overwrite(executor, playbook):
# task 1
assert events[2]['event'] == 'playbook_on_task_start'
# Ordering of task and item events may differ randomly
assert set(['runner_on_start', 'runner_item_on_ok', 'runner_on_ok']) == set([data['event'] for data in events[3:6]])
assert set(['runner_on_start', 'runner_item_on_ok', 'runner_on_ok']) == {data['event'] for data in events[3:6]}

# task 2 no_log=True
assert events[6]['event'] == 'playbook_on_task_start'
Expand All @@ -264,9 +265,9 @@ def test_callback_plugin_censoring_does_not_overwrite(executor, playbook):
- shell: echo "Hello, World!"
'''}, # noqa
])
def test_callback_plugin_strips_task_environ_variables(executor, playbook):
def test_callback_plugin_strips_task_environ_variables(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert len(list(executor.events))
assert list(executor.events)
for event in list(executor.events):
assert os.environ['PATH'] not in json.dumps(event)

Expand All @@ -282,7 +283,7 @@ def test_callback_plugin_strips_task_environ_variables(executor, playbook):
foo: "bar"
'''}, # noqa
])
def test_callback_plugin_saves_custom_stats(executor, playbook):
def test_callback_plugin_saves_custom_stats(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
for event in executor.events:
event_data = event.get('event_data', {})
Expand All @@ -308,9 +309,9 @@ def test_callback_plugin_saves_custom_stats(executor, playbook):
- my_handler
'''}, # noqa
])
def test_callback_plugin_records_notify_events(executor, playbook):
def test_callback_plugin_records_notify_events(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert len(list(executor.events))
assert list(executor.events)
notify_events = [x for x in executor.events if x['event'] == 'playbook_on_notify']
assert len(notify_events) == 1
assert notify_events[0]['event_data']['handler'] == 'my_handler'
Expand All @@ -332,12 +333,12 @@ def test_callback_plugin_records_notify_events(executor, playbook):
url_password: "{{ pw }}"
'''}, # noqa
])
def test_module_level_no_log(executor, playbook):
def test_module_level_no_log(executor, playbook): # pylint: disable=W0613,W0621
# It's possible for `no_log=True` to be defined at the _module_ level,
# e.g., for the URI module password parameter
# This test ensures that we properly redact those
executor.run()
assert len(list(executor.events))
assert list(executor.events)
assert 'john-jacob-jingleheimer-schmidt' in json.dumps(list(executor.events))
assert 'SENSITIVE' not in json.dumps(list(executor.events))

Expand All @@ -354,15 +355,15 @@ def test_output_when_given_invalid_playbook(tmp_path):
#
# But no test validated it. This does that.
private_data_dir = str(tmp_path)
executor = init_runner(
ex = init_runner(
private_data_dir=private_data_dir,
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"},
playbook=os.path.join(private_data_dir, 'fake_playbook.yml')
)

executor.run()
with executor.stdout as f:
ex.run()
with ex.stdout as f:
stdout = f.read()
assert "ERROR! the playbook:" in stdout
assert "could not be found" in stdout
Expand Down Expand Up @@ -391,20 +392,20 @@ def test_output_when_given_non_playbook_script(tmp_path):
with open(os.path.join(private_data_dir, "env", "settings"), 'w') as settings_file:
settings_file.write("pexpect_timeout: 0.2")

executor = init_runner(
ex = init_runner(
private_data_dir=private_data_dir,
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"}
)

executor.run()
ex.run()

with executor.stdout as f:
with ex.stdout as f:
stdout = f.readlines()
assert stdout[0].strip() == "hi world"
assert stdout[1].strip() == "goodbye world"

events = list(executor.events)
events = list(ex.events)

assert len(events) == 2
assert events[0]['event'] == 'verbose'
Expand All @@ -425,7 +426,7 @@ def test_output_when_given_non_playbook_script(tmp_path):
msg: "{{ ('F' * 150) | list }}"
'''}, # noqa
])
def test_large_stdout_parsing_when_using_json_output(executor, playbook):
def test_large_stdout_parsing_when_using_json_output(executor, playbook): # pylint: disable=W0613,W0621
# When the json flag is used, it is possible to output more data than
# pexpect's maxread default of 2000 characters. As a result, if not
# handled properly, the stdout can end up being corrupted with partial
Expand Down
12 changes: 5 additions & 7 deletions test/integration/test_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ def test_basic_events(containerized, runtime, tmp_path, container_image, is_run_
thread.join() # ensure async run finishes

event_types = [x['event'] for x in r.events if x['event'] != 'verbose']
okay_events = [x for x in filter(lambda x: 'event' in x and x['event'] == 'runner_on_ok',
r.events)]
okay_events = list(filter(lambda x: 'event' in x and x['event'] == 'runner_on_ok', r.events))

assert event_types[0] == 'playbook_on_start'
assert "playbook_on_play_start" in event_types
Expand Down Expand Up @@ -61,7 +60,7 @@ def test_basic_serializeable(tmp_path):
r = run(private_data_dir=str(tmp_path),
inventory=inv,
playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
events = [x for x in r.events]
events = list(r.events)
json.dumps(events)


Expand All @@ -79,7 +78,7 @@ def test_event_omission(tmp_path):
continue
events.append(x)

assert not any([x['event_data'] for x in events])
assert not any(x['event_data'] for x in events)


def test_event_omission_except_failed(tmp_path):
Expand All @@ -101,12 +100,11 @@ def test_event_omission_except_failed(tmp_path):
assert len(all_event_datas) == 1


def test_runner_on_start(rc, tmp_path):
def test_runner_on_start(tmp_path):
r = run(private_data_dir=str(tmp_path),
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
start_events = [x for x in filter(lambda x: 'event' in x and x['event'] == 'runner_on_start',
r.events)]
start_events = list(filter(lambda x: 'event' in x and x['event'] == 'runner_on_start', r.events))
assert len(start_events) == 1


Expand Down
Loading

0 comments on commit 60cfaa1

Please sign in to comment.