From 7d52cd6d4308e222e26c29046d4cb788fad4c6f0 Mon Sep 17 00:00:00 2001 From: Nathan Weinberg Date: Wed, 3 Mar 2021 15:30:10 -0500 Subject: [PATCH] Restructured entire project --- .flake8 | 4 + .github/workflows/codecov.yml | 6 +- .github/workflows/flake8.yml | 10 +- .github/workflows/pytest.yml | 5 +- docs/README.md | 7 +- functions.py | 371 ------------------------ jeeves.py | 6 +- jeeves/__init__.py | 0 jeeves/blockers.py | 172 +++++++++++ jeeves/common.py | 70 +++++ jeeves/jobs.py | 134 +++++++++ remind.py => jeeves/remind.py | 7 +- report.py => jeeves/report.py | 10 +- requirements.txt | 1 + tests/__init__.py | 0 tests/test_blockers.py | 54 ++++ tests/test_common.py | 19 ++ test_functions.py => tests/test_jobs.py | 73 +---- tests/test_remind.py | 5 + tests/test_report.py | 5 + 20 files changed, 490 insertions(+), 469 deletions(-) create mode 100644 .flake8 delete mode 100644 functions.py create mode 100644 jeeves/__init__.py create mode 100644 jeeves/blockers.py create mode 100644 jeeves/common.py create mode 100644 jeeves/jobs.py rename remind.py => jeeves/remind.py (96%) rename report.py => jeeves/report.py (97%) create mode 100644 tests/__init__.py create mode 100644 tests/test_blockers.py create mode 100644 tests/test_common.py rename test_functions.py => tests/test_jobs.py (54%) mode change 100755 => 100644 create mode 100644 tests/test_remind.py create mode 100644 tests/test_report.py diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..1a0ed26 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +ignore = E117,E501,E722,W191 +exclude = + tests diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index d2d9b17..f6ad00e 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -11,18 +11,16 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 - - name: Set up Python 3.7 + - name: Set up Python 3.8 uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.8 - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: Generate coverage report run: | - pip install pytest - pip install pytest-cov pytest --cov=./ --cov-report=xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v1 diff --git a/.github/workflows/flake8.yml b/.github/workflows/flake8.yml index f203b55..3fb9813 100644 --- a/.github/workflows/flake8.yml +++ b/.github/workflows/flake8.yml @@ -8,18 +8,14 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 - - name: Set up Python 3.7 + - name: Set up Python 3.8 uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.8 - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: Lint with flake8 run: | - pip install flake8 - flake8 --ignore=E117,E501,E722,W191 jeeves.py - flake8 --ignore=E117,E501,E722,W191 report.py - flake8 --ignore=E117,E501,E722,W191 remind.py - flake8 --ignore=E117,E501,E722,W191 functions.py + flake8 diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 3dad690..66ed427 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -8,15 +8,14 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v2 - - name: Set up Python 3.7 + - name: Set up Python 3.8 uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.8 - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: Test with pytest run: | - pip install pytest pytest diff --git a/docs/README.md b/docs/README.md index 1b8932d..32033d4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -71,8 +71,13 @@ To install packages run: It is recommended you do this within a virtual environment. +## Linting +Jeeves follows a set of [PEP8](https://www.python.org/dev/peps/pep-0008/) standards in the interest of code clarity and consistency. These rules are enforced with the [flake8](https://flake8.pycqa.org/en/latest/) library. Configuration for linting resides in the `.flake8.ini` file. + +To run linting simply run the `flake8` command within the Jeeves directory. + ## Testing -Jeeves has a small but growing test suite driven by [pytest](https://docs.pytest.org/en/latest/index.html). Currently all tests reside in the `test_functions.py` file. +Jeeves has a small but growing test suite driven by [pytest](https://docs.pytest.org/en/latest/index.html). Currently all tests reside in the `tests` directory. To run tests simply run the `pytest` command within the Jeeves directory. diff --git a/functions.py b/functions.py deleted file mode 100644 index 838bf1b..0000000 --- a/functions.py +++ /dev/null @@ -1,371 +0,0 @@ -# Shared library of functions for other Python files - -import os -import re -import datetime -import bugzilla -from jira import JIRA - - -def generate_header(source, filter_param_name=None, filter_param_value=None, remind=False): - ''' generates header - optionally takes name and value of jenkins param to filter builds by - if remind is true, header source should be blocker_file - if remind is false, header source should be job_search_fields - ''' - date = '{:%m/%d/%Y at %I:%M%p %Z}'.format(datetime.datetime.now()) - - # show only filename in remind header, not full path - if remind: - source = source.rsplit('/', 1)[-1] - - header = { - 'date': date, - 'source': source, - 'fpn': filter_param_name, - 'fpv': filter_param_value - } - return header - - -def generate_html_file(htmlcode, remind=False): - ''' generates HTML file of reminder - ''' - try: - os.makedirs('archive') - except FileExistsError: - pass - reportType = 'reminder' if remind else 'report' - filename = './archive/{}_{:%Y-%m-%d_%H-%M-%S}.html'.format( - reportType, datetime.datetime.now()) - with open(filename, 'w') as file: - file.write(htmlcode) - return filename - - -def get_bugs_dict(bug_ids, config): - ''' takes in set of bug_ids and returns dictionary with - bug_ids as keys and API data as values - a bug_id value of 0 will be ignored - ''' - - # initialize bug dictionary - bug_dict = {} - - # API connection does not work if '/' present at end of URL string - parsed_bz_url = config['bz_url'].rstrip('/') - bz_api = None - - # iterate through bug ids from set - for bug_id in bug_ids: - - # a bug_id value of 0 is used as a placeholder, not a valid bug - # skip as there is no API data to be fetched in this case - if bug_id == 0: - continue - - # get bug info from bugzilla API - try: - - # initialize connection if it has not yet been done (either first iteration or previously failed) - if bz_api is None: - bz_api = bugzilla.Bugzilla(parsed_bz_url) - - bug = bz_api.getbug(bug_id) - bug_status = '[' + bug.status + ']' - bug_summary = bug.summary - bug_name = ' '.join([bug_status, bug_summary]) - except Exception as e: - print("Bugzilla API Call Error: ", e) - bug_name = "BZ#" + str(bug_id) - bz_api = None - finally: - bug_url = config['bz_url'] + "/show_bug.cgi?id=" + str(bug_id) - bug_dict[bug_id] = {'bug_name': bug_name, 'bug_url': bug_url} - - return bug_dict - - -def get_bugs_set(blockers): - ''' takes in blockers dict and generates a set of all unique bug ids - excludes 0 if it is present - passing an empty dict will result in an empty set - ''' - bug_set = set() - for job in blockers: - - # try to fetch 'bz' field from job - try: - bz = blockers[job]['bz'] - bug_set.update(bz) - - # failure means data was not formatted correctly for given job - log and skip - except Exception as e: - print("Error getting bug IDs from blockers file for job {}: {}".format(job, e)) - continue - - # discard bug_id value of 0 from set if present as this is not a valid bug - bug_set.discard(0) - return bug_set - - -def get_jenkins_job_info(server, job_name, filter_param_name=None, filter_param_value=None): - ''' takes in jenkins server object and job name - optionally takes name and value of jenkins param to filter builds by - returns dict of API info for given job if success - returns False if failure - ''' - - # set default value for job_info for cased exception handling - job_info = {} - - try: - job_info = server.get_job_info(job_name) - job_url = job_info['url'] - lcb_num = job_info['lastCompletedBuild']['number'] - tempest_tests_failed = None - build_info = server.get_build_info(job_name, lcb_num) - build_actions = build_info['actions'] - for action in build_actions: - if action.get('_class') in ['com.tikal.jenkins.plugins.multijob.MultiJobParametersAction', 'hudson.model.ParametersAction']: - build_parameters = action['parameters'] - elif action.get('_class') == 'hudson.tasks.junit.TestResultAction': - tempest_tests_failed = action['failCount'] - - # if desired, get last completed build with custom parameter and value - if filter_param_name is not None and filter_param_value is not None: - api_param_value = [param['value'] for param in build_parameters if filter_param_name == param.get('name', '')][0] - while api_param_value != filter_param_value: - lcb_num = lcb_num - 1 - build_info = server.get_build_info(job_name, lcb_num) - build_actions = build_info['actions'] - for action in build_actions: - if action.get('_class') in ['com.tikal.jenkins.plugins.multijob.MultiJobParametersAction', 'hudson.model.ParametersAction']: - build_parameters = action['parameters'] - break - api_param_value = [param['value'] for param in build_parameters if filter_param_name == param.get('name', '')][0] - - build_time = build_info.get('timestamp') - build_days_ago = (datetime.datetime.now() - datetime.datetime.fromtimestamp(build_time / 1000)).days - lcb_url = build_info['url'] - lcb_result = build_info['result'] - composes = [str(action['html']).split('core_puddle:')[1].split('<')[0].strip() for action in build_actions if 'core_puddle' in action.get('html', '')] - - # No composes could be found; likely a failed job where the 'core_puddle' var was never calculated - if composes == []: - compose = "Could not find compose" - second_compose = None - # Two composes found - job is likely Update or Upgrade - elif len(composes) == 2: - compose = composes[0] - second_compose = composes[1] - # One compose found - else: - compose = composes[0] - second_compose = None - - except Exception as e: - - # No "Last Completed Build" found - # Checks for len <= 1 as running builds are included in the below query - if len(job_info.get('builds')) <= 1: - lcb_num = None - lcb_url = None - compose = "N/A" - second_compose = None - build_days_ago = "N/A" - lcb_result = "NO_KNOWN_BUILDS" - tempest_tests_failed = None - - # Unknown error, skip job - else: - print("Jenkins API call error on job {}: {} - skipping...".format(job_name, e)) - return False - - jenkins_api_info = { - 'job_url': job_url, - 'lcb_num': lcb_num, - 'lcb_url': lcb_url, - 'compose': compose, - 'second_compose': second_compose, - 'lcb_result': lcb_result, - 'build_days_ago': build_days_ago, - 'tempest_tests_failed': tempest_tests_failed - } - return jenkins_api_info - - -def get_jenkins_jobs(server, job_search_fields): - ''' takes in a Jenkins server object and job_search_fields string - returns list of jobs with given search field as part of their name - ''' - - # parse list of search fields - fields = job_search_fields.split(',') - fields_length = len(fields) - - # remove spacing from strings - for i in range(fields_length): - fields[i] = fields[i].strip(' ') - - # check for fields that contain valid regex - relevant_jobs = [] - supported_versions = ['13', '16.1', '16.2'] - for field in fields: - try: - - # fetch all jobs from server that match the given regex or search - all_jobs = server.get_job_info_regex(field) - - # parse out all jobs that do not contain any search field and/or are not a supported version - for job in all_jobs: - job_name = job['name'] - if any(supported_version in job_name for supported_version in supported_versions): - relevant_jobs.append(job) - - except Exception as e: - print("Error compiling regex: {} - skipping this search field...".format(e)) - - return relevant_jobs - - -def get_tickets_dict(ticket_ids, config): - ''' takes in set of ticket_ids and returns dictionary with - ticket_ids as keys and API data as values - a ticket_id with a value of 0 will be ignored - ''' - - # initialize ticket dictionary - ticket_dict = {} - - # initialize jira variable and config options - auth = (config['jira_username'], config['jira_password']) - options = { - "server": config['jira_url'], - "verify": config['certificate'] - } - jira = None - - # iterate through ticket ids from set - for ticket_id in ticket_ids: - - # a ticket_id value of 0 is used as a placeholder, not a valid ticket - # skip as there is no API data to be fetched in this case - if ticket_id == 0: - continue - - # get ticket info from jira API - try: - - # initialize connection if it has not yet been done (either first iteration or previously failed) - if jira is None: - jira = JIRA(auth=auth, options=options) - - issue = jira.issue(ticket_id) - ticket_status = '[' + str(issue.fields.status) + ']' - ticket_summary = issue.fields.summary - ticket_name = ' '.join([ticket_status.upper(), ticket_summary]) - except Exception as e: - print("Jira API Call Error: ", e) - ticket_name = ticket_id - jira = None - finally: - ticket_url = config['jira_url'] + "/browse/" + str(ticket_id) - ticket_dict[ticket_id] = { - 'ticket_name': ticket_name, - 'ticket_url': ticket_url - } - - # close Jira connection if open - if jira is not None: - jira.close() - - return ticket_dict - - -def get_tickets_set(blockers): - ''' takes in blockers object and generates a set of all unique jira ticket ids - excluding 0 if it is present - passing an empty dict will result in an empty set - ''' - jira_set = set() - for job in blockers: - - # try to fetch 'jira' field from job - try: - jira = blockers[job]['jira'] - jira_set.update(jira) - - # failure means data was not formatted correctly for given job - log and skip - except Exception as e: - print("Error getting jira IDs from blockers file for job {}: {}".format(job, e)) - continue - - # discard ticket_id value of 0 from set if present as this is not a valid ticket - jira_set.discard(0) - return jira_set - - -def get_osp_version(job_name): - ''' gets osp version from job name via regex - returns None if no version is found - ''' - version = re.search(r'\d+\.*\d*', job_name) - if version is None: - return None - return version.group() - - -def get_other_blockers(blockers, job_name): - ''' takes in blockers object and job name - returns list of 'other' blockers - ''' - other = [] - other_blockers = blockers[job_name].get('other') - if other_blockers is None: - return other - for blocker in other_blockers: - other.append({'other_name': blocker.get('name', 'Link'), 'other_url': blocker.get('url', None)}) - return other - - -def has_blockers(blockers, job_name): - ''' returns True if job_name in blockers has any defined blockers - returns False otherwise - ''' - is_bz = blockers[job_name].get('bz', [0]) - is_jira = blockers[job_name].get('jira', [0]) - is_other = blockers[job_name].get('other', [0]) - if (is_bz == [0]) and (is_jira == [0]) and (is_other == [0]): - return False - return True - - -def percent(part, whole): - ''' basic percent function - ''' - return round(100 * float(part) / float(whole), 1) - - -def validate_config(config, no_email): - ''' validates config fields - raises exception if required field is not present - ''' - required_fields = [ - 'jenkins_url', - 'job_search_fields', - 'bz_url', - 'jira_url', - 'certificate' - ] - - if not no_email: - required_fields.append('email_from') - required_fields.append('email_subject') - required_fields.append('email_to') - required_fields.append('smtp_host') - - for field in required_fields: - if config.get(field) is None: - raise Exception('field "{}" is not defined'.format(field)) - return None diff --git a/jeeves.py b/jeeves.py index 0cc1f8b..b68bf23 100755 --- a/jeeves.py +++ b/jeeves.py @@ -6,9 +6,9 @@ import jenkins import argparse -from report import run_report -from remind import run_remind -from functions import generate_header, validate_config +from jeeves.report import run_report +from jeeves.remind import run_remind +from jeeves.common import generate_header, validate_config os.environ['PYTHONHTTPSVERIFY'] = '0' diff --git a/jeeves/__init__.py b/jeeves/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jeeves/blockers.py b/jeeves/blockers.py new file mode 100644 index 0000000..f4ec972 --- /dev/null +++ b/jeeves/blockers.py @@ -0,0 +1,172 @@ +# library functions for handling blocker data + +import bugzilla +from jira import JIRA + + +def get_bugs_dict(bug_ids, config): + ''' takes in set of bug_ids and returns dictionary with + bug_ids as keys and API data as values + a bug_id value of 0 will be ignored + ''' + + # initialize bug dictionary + bug_dict = {} + + # API connection does not work if '/' present at end of URL string + parsed_bz_url = config['bz_url'].rstrip('/') + bz_api = None + + # iterate through bug ids from set + for bug_id in bug_ids: + + # a bug_id value of 0 is used as a placeholder, not a valid bug + # skip as there is no API data to be fetched in this case + if bug_id == 0: + continue + + # get bug info from bugzilla API + try: + + # initialize connection if it has not yet been done (either first iteration or previously failed) + if bz_api is None: + bz_api = bugzilla.Bugzilla(parsed_bz_url) + + bug = bz_api.getbug(bug_id) + bug_status = '[' + bug.status + ']' + bug_summary = bug.summary + bug_name = ' '.join([bug_status, bug_summary]) + except Exception as e: + print("Bugzilla API Call Error: ", e) + bug_name = "BZ#" + str(bug_id) + bz_api = None + finally: + bug_url = config['bz_url'] + "/show_bug.cgi?id=" + str(bug_id) + bug_dict[bug_id] = {'bug_name': bug_name, 'bug_url': bug_url} + + return bug_dict + + +def get_bugs_set(blockers): + ''' takes in blockers dict and generates a set of all unique bug ids + excludes 0 if it is present + passing an empty dict will result in an empty set + ''' + bug_set = set() + for job in blockers: + + # try to fetch 'bz' field from job + try: + bz = blockers[job]['bz'] + bug_set.update(bz) + + # failure means data was not formatted correctly for given job - log and skip + except Exception as e: + print("Error getting bug IDs from blockers file for job {}: {}".format(job, e)) + continue + + # discard bug_id value of 0 from set if present as this is not a valid bug + bug_set.discard(0) + return bug_set + + +def get_tickets_dict(ticket_ids, config): + ''' takes in set of ticket_ids and returns dictionary with + ticket_ids as keys and API data as values + a ticket_id with a value of 0 will be ignored + ''' + + # initialize ticket dictionary + ticket_dict = {} + + # initialize jira variable and config options + auth = (config['jira_username'], config['jira_password']) + options = { + "server": config['jira_url'], + "verify": config['certificate'] + } + jira = None + + # iterate through ticket ids from set + for ticket_id in ticket_ids: + + # a ticket_id value of 0 is used as a placeholder, not a valid ticket + # skip as there is no API data to be fetched in this case + if ticket_id == 0: + continue + + # get ticket info from jira API + try: + + # initialize connection if it has not yet been done (either first iteration or previously failed) + if jira is None: + jira = JIRA(auth=auth, options=options) + + issue = jira.issue(ticket_id) + ticket_status = '[' + str(issue.fields.status) + ']' + ticket_summary = issue.fields.summary + ticket_name = ' '.join([ticket_status.upper(), ticket_summary]) + except Exception as e: + print("Jira API Call Error: ", e) + ticket_name = ticket_id + jira = None + finally: + ticket_url = config['jira_url'] + "/browse/" + str(ticket_id) + ticket_dict[ticket_id] = { + 'ticket_name': ticket_name, + 'ticket_url': ticket_url + } + + # close Jira connection if open + if jira is not None: + jira.close() + + return ticket_dict + + +def get_tickets_set(blockers): + ''' takes in blockers object and generates a set of all unique jira ticket ids + excluding 0 if it is present + passing an empty dict will result in an empty set + ''' + jira_set = set() + for job in blockers: + + # try to fetch 'jira' field from job + try: + jira = blockers[job]['jira'] + jira_set.update(jira) + + # failure means data was not formatted correctly for given job - log and skip + except Exception as e: + print("Error getting jira IDs from blockers file for job {}: {}".format(job, e)) + continue + + # discard ticket_id value of 0 from set if present as this is not a valid ticket + jira_set.discard(0) + return jira_set + + +def get_other_blockers(blockers, job_name): + ''' takes in blockers object and job name + returns list of 'other' blockers + ''' + other = [] + other_blockers = blockers[job_name].get('other') + if other_blockers is None: + return other + for blocker in other_blockers: + other.append({'other_name': blocker.get('name', 'Link'), 'other_url': blocker.get('url', None)}) + return other + + +def has_blockers(blockers, job_name): + ''' returns True if job_name in blockers has any defined blockers + returns False otherwise + ''' + is_bz = blockers[job_name].get('bz', [0]) + is_jira = blockers[job_name].get('jira', [0]) + is_other = blockers[job_name].get('other', [0]) + if (is_bz == [0]) and (is_jira == [0]) and (is_other == [0]): + return False + return True diff --git a/jeeves/common.py b/jeeves/common.py new file mode 100644 index 0000000..7e42b91 --- /dev/null +++ b/jeeves/common.py @@ -0,0 +1,70 @@ +# common functions used throughout jeeves + +import os +import datetime + + +def generate_header(source, filter_param_name=None, filter_param_value=None, remind=False): + ''' generates header + optionally takes name and value of jenkins param to filter builds by + if remind is true, header source should be blocker_file + if remind is false, header source should be job_search_fields + ''' + date = '{:%m/%d/%Y at %I:%M%p %Z}'.format(datetime.datetime.now()) + + # show only filename in remind header, not full path + if remind: + source = source.rsplit('/', 1)[-1] + + header = { + 'date': date, + 'source': source, + 'fpn': filter_param_name, + 'fpv': filter_param_value + } + return header + + +def generate_html_file(htmlcode, remind=False): + ''' generates HTML file of reminder + ''' + try: + os.makedirs('archive') + except FileExistsError: + pass + reportType = 'reminder' if remind else 'report' + filename = './archive/{}_{:%Y-%m-%d_%H-%M-%S}.html'.format( + reportType, datetime.datetime.now()) + with open(filename, 'w') as file: + file.write(htmlcode) + return filename + + +def percent(part, whole): + ''' basic percent function + ''' + return round(100 * float(part) / float(whole), 1) + + +def validate_config(config, no_email): + ''' validates config fields + raises exception if required field is not present + ''' + required_fields = [ + 'jenkins_url', + 'job_search_fields', + 'bz_url', + 'jira_url', + 'certificate' + ] + + if not no_email: + required_fields.append('email_from') + required_fields.append('email_subject') + required_fields.append('email_to') + required_fields.append('smtp_host') + + for field in required_fields: + if config.get(field) is None: + raise Exception('field "{}" is not defined'.format(field)) + return None diff --git a/jeeves/jobs.py b/jeeves/jobs.py new file mode 100644 index 0000000..290517e --- /dev/null +++ b/jeeves/jobs.py @@ -0,0 +1,134 @@ +# library functions for handling job data + +import re +import datetime + + +def get_jenkins_job_info(server, job_name, filter_param_name=None, filter_param_value=None): + ''' takes in jenkins server object and job name + optionally takes name and value of jenkins param to filter builds by + returns dict of API info for given job if success + returns False if failure + ''' + + # set default value for job_info for cased exception handling + job_info = {} + + try: + job_info = server.get_job_info(job_name) + job_url = job_info['url'] + lcb_num = job_info['lastCompletedBuild']['number'] + tempest_tests_failed = None + build_info = server.get_build_info(job_name, lcb_num) + build_actions = build_info['actions'] + for action in build_actions: + if action.get('_class') in ['com.tikal.jenkins.plugins.multijob.MultiJobParametersAction', 'hudson.model.ParametersAction']: + build_parameters = action['parameters'] + elif action.get('_class') == 'hudson.tasks.junit.TestResultAction': + tempest_tests_failed = action['failCount'] + + # if desired, get last completed build with custom parameter and value + if filter_param_name is not None and filter_param_value is not None: + api_param_value = [param['value'] for param in build_parameters if filter_param_name == param.get('name', '')][0] + while api_param_value != filter_param_value: + lcb_num = lcb_num - 1 + build_info = server.get_build_info(job_name, lcb_num) + build_actions = build_info['actions'] + for action in build_actions: + if action.get('_class') in ['com.tikal.jenkins.plugins.multijob.MultiJobParametersAction', 'hudson.model.ParametersAction']: + build_parameters = action['parameters'] + break + api_param_value = [param['value'] for param in build_parameters if filter_param_name == param.get('name', '')][0] + + build_time = build_info.get('timestamp') + build_days_ago = (datetime.datetime.now() - datetime.datetime.fromtimestamp(build_time / 1000)).days + lcb_url = build_info['url'] + lcb_result = build_info['result'] + composes = [str(action['html']).split('core_puddle:')[1].split('<')[0].strip() for action in build_actions if 'core_puddle' in action.get('html', '')] + + # No composes could be found; likely a failed job where the 'core_puddle' var was never calculated + if composes == []: + compose = "Could not find compose" + second_compose = None + # Two composes found - job is likely Update or Upgrade + elif len(composes) == 2: + compose = composes[0] + second_compose = composes[1] + # One compose found + else: + compose = composes[0] + second_compose = None + + except Exception as e: + + # No "Last Completed Build" found + # Checks for len <= 1 as running builds are included in the below query + if len(job_info.get('builds')) <= 1: + lcb_num = None + lcb_url = None + compose = "N/A" + second_compose = None + build_days_ago = "N/A" + lcb_result = "NO_KNOWN_BUILDS" + tempest_tests_failed = None + + # Unknown error, skip job + else: + print("Jenkins API call error on job {}: {} - skipping...".format(job_name, e)) + return False + + jenkins_api_info = { + 'job_url': job_url, + 'lcb_num': lcb_num, + 'lcb_url': lcb_url, + 'compose': compose, + 'second_compose': second_compose, + 'lcb_result': lcb_result, + 'build_days_ago': build_days_ago, + 'tempest_tests_failed': tempest_tests_failed + } + return jenkins_api_info + + +def get_jenkins_jobs(server, job_search_fields): + ''' takes in a Jenkins server object and job_search_fields string + returns list of jobs with given search field as part of their name + ''' + + # parse list of search fields + fields = job_search_fields.split(',') + fields_length = len(fields) + + # remove spacing from strings + for i in range(fields_length): + fields[i] = fields[i].strip(' ') + + # check for fields that contain valid regex + relevant_jobs = [] + supported_versions = ['13', '16.1', '16.2'] + for field in fields: + try: + + # fetch all jobs from server that match the given regex or search + all_jobs = server.get_job_info_regex(field) + + # parse out all jobs that do not contain any search field and/or are not a supported version + for job in all_jobs: + job_name = job['name'] + if any(supported_version in job_name for supported_version in supported_versions): + relevant_jobs.append(job) + + except Exception as e: + print("Error compiling regex: {} - skipping this search field...".format(e)) + + return relevant_jobs + + +def get_osp_version(job_name): + ''' gets osp version from job name via regex + returns None if no version is found + ''' + version = re.search(r'\d+\.*\d*', job_name) + if version is None: + return None + return version.group() diff --git a/remind.py b/jeeves/remind.py similarity index 96% rename from remind.py rename to jeeves/remind.py index 866ab42..10e0fcc 100755 --- a/remind.py +++ b/jeeves/remind.py @@ -3,9 +3,10 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from smtplib import SMTP -from functions import generate_html_file, get_osp_version, \ - get_jenkins_job_info, get_bugs_dict, get_tickets_dict, \ - get_other_blockers + +from jeeves.common import generate_html_file +from jeeves.jobs import get_jenkins_job_info, get_osp_version +from jeeves.blockers import get_bugs_dict, get_tickets_dict, get_other_blockers def run_remind(config, blockers, server, header): diff --git a/report.py b/jeeves/report.py similarity index 97% rename from report.py rename to jeeves/report.py index 4b4306d..f1075c6 100755 --- a/report.py +++ b/jeeves/report.py @@ -1,15 +1,15 @@ +import sys import json import jinja2 -import sys from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from smtplib import SMTP from urllib.parse import quote -from functions import generate_html_file, get_bugs_dict, \ - get_bugs_set, get_jenkins_job_info, get_jenkins_jobs, \ - get_tickets_dict, get_tickets_set, get_osp_version, \ - get_other_blockers, percent + +from jeeves.common import generate_html_file, percent +from jeeves.jobs import get_jenkins_job_info, get_jenkins_jobs, get_osp_version +from jeeves.blockers import get_bugs_dict, get_bugs_set, get_tickets_dict, get_tickets_set, get_other_blockers def run_report(config, blockers, preamble_file, server, header, test_email, no_email, template_file): diff --git a/requirements.txt b/requirements.txt index 7f92e1e..a26d395 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,3 +5,4 @@ python-bugzilla==3.0.2 jira==2.0.0 pytest==6.2.2 pytest-cov==2.11.1 +flake8==3.8.4 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_blockers.py b/tests/test_blockers.py new file mode 100644 index 0000000..7c34118 --- /dev/null +++ b/tests/test_blockers.py @@ -0,0 +1,54 @@ +from jeeves.blockers import * + + +def test_get_bugs_dict(): + pass + + +def test_get_bugs_set(): + mockers = { + 'job1': {'bz': [0]}, + 'job2': {'bz': [123456]}, + 'job3': {'bz': [123456, 789123]} + } + assert get_bugs_set(mockers) == {123456, 789123} + + +def test_get_tickets_dict(): + pass + + +def test_get_tickets_set(): + mockers = { + 'job1': {'jira': [0]}, + 'job2': {'jira': ['RHOSINFRA-123']}, + 'job3': {'jira': ['RHOSINFRA-123', 'RHOSENTDFG-456']} + } + assert get_tickets_set(mockers) == {'RHOSINFRA-123', 'RHOSENTDFG-456'} + + +def test_get_other_blockers(): + pass + + +def test_has_blockers(): + mockers = { + 'job1': {'bz': [123456]}, + 'job2': {'jira': ['RHOSINFRA-123']}, + 'job3': {'other': {'name': 'this is a test name'}}, + 'job4': {'bz': [0]}, + 'job5': {'jira': [0]}, + 'job6': {'other': [0]}, + 'job7': {'owners': 'foo@bar.com'}, + 'job8': {'owners': 'foo@bar.com', 'bz': [123456], 'jira': ['RHOSINFRA-123']}, + 'job9': {} + } + assert has_blockers(mockers, 'job1') == True + assert has_blockers(mockers, 'job2') == True + assert has_blockers(mockers, 'job3') == True + assert has_blockers(mockers, 'job4') == False + assert has_blockers(mockers, 'job5') == False + assert has_blockers(mockers, 'job6') == False + assert has_blockers(mockers, 'job7') == False + assert has_blockers(mockers, 'job8') == True + assert has_blockers(mockers, 'job9') == False diff --git a/tests/test_common.py b/tests/test_common.py new file mode 100644 index 0000000..3c10736 --- /dev/null +++ b/tests/test_common.py @@ -0,0 +1,19 @@ +from jeeves.common import * + + +def test_generate_header(): + pass + + +def test_generate_html_file(): + pass + + +def test_percent_func(): + assert percent(0, 1) == 0.0 + assert percent(1, 2) == 50.0 + assert percent(1, 1) == 100.0 + + +def test_validate_config(): + pass diff --git a/test_functions.py b/tests/test_jobs.py old mode 100755 new mode 100644 similarity index 54% rename from test_functions.py rename to tests/test_jobs.py index a122c6e..9dda620 --- a/test_functions.py +++ b/tests/test_jobs.py @@ -1,25 +1,4 @@ -from functions import * - - -def test_generate_header(): - pass - - -def test_generate_html_file(): - pass - - -def get_bugs_dict(): - pass - - -def test_get_bugs_set(): - mockers = { - 'job1': {'bz': [0]}, - 'job2': {'bz': [123456]}, - 'job3': {'bz': [123456, 789123]} - } - assert get_bugs_set(mockers) == {123456, 789123} +from jeeves.jobs import * def test_get_jenkins_job_info(): @@ -30,19 +9,6 @@ def test_get_jenkins_jobs(): pass -def test_get_tickets_dict(): - pass - - -def test_get_tickets_set(): - mockers = { - 'job1': {'jira': [0]}, - 'job2': {'jira': ['RHOSINFRA-123']}, - 'job3': {'jira': ['RHOSINFRA-123', 'RHOSENTDFG-456']} - } - assert get_tickets_set(mockers) == {'RHOSINFRA-123', 'RHOSENTDFG-456'} - - def test_get_osp_version(): assert get_osp_version('DFG-all-unified-16_director-rhel-virthost-3cont_2comp_3ceph-ipv4-geneve-ceph-native-default') == '16' assert get_osp_version('DFG-backup-restore-overcloud-OSP-16-3cont_2comp_3ceph-ipv4-monolithic-broken-node') == '16' @@ -59,40 +25,3 @@ def test_get_osp_version(): assert get_osp_version('DFG-security-keystone-16_director-rhel-virthost-1cont_1comp-ipv4-geneve-lvm-containers') == '16' assert get_osp_version('DFG-upgrades-updates-16-from-passed_phase1-HA-ipv4') == '16' assert get_osp_version('DFG-all-unified-weekly-multijob') is None - - -def test_get_other_blockers(): - pass - - -def test_has_blockers(): - mockers = { - 'job1': {'bz': [123456]}, - 'job2': {'jira': ['RHOSINFRA-123']}, - 'job3': {'other': {'name': 'this is a test name'}}, - 'job4': {'bz': [0]}, - 'job5': {'jira': [0]}, - 'job6': {'other': [0]}, - 'job7': {'owners': 'foo@bar.com'}, - 'job8': {'owners': 'foo@bar.com', 'bz': [123456], 'jira': ['RHOSINFRA-123']}, - 'job9': {} - } - assert has_blockers(mockers, 'job1') == True - assert has_blockers(mockers, 'job2') == True - assert has_blockers(mockers, 'job3') == True - assert has_blockers(mockers, 'job4') == False - assert has_blockers(mockers, 'job5') == False - assert has_blockers(mockers, 'job6') == False - assert has_blockers(mockers, 'job7') == False - assert has_blockers(mockers, 'job8') == True - assert has_blockers(mockers, 'job9') == False - - -def test_percent_func(): - assert percent(0, 1) == 0.0 - assert percent(1, 2) == 50.0 - assert percent(1, 1) == 100.0 - - -def test_validate_config(): - pass diff --git a/tests/test_remind.py b/tests/test_remind.py new file mode 100644 index 0000000..27ef611 --- /dev/null +++ b/tests/test_remind.py @@ -0,0 +1,5 @@ +from jeeves.remind import * + + +def test_run_remind(): + pass diff --git a/tests/test_report.py b/tests/test_report.py new file mode 100644 index 0000000..a7fbddb --- /dev/null +++ b/tests/test_report.py @@ -0,0 +1,5 @@ +from jeeves.report import * + + +def test_run_report(): + pass