Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update test regression #3337

Merged
merged 4 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 88 additions & 15 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import pathlib
import json
import pytest
import re

try:
os.environ['CMR_USER']
Expand Down Expand Up @@ -54,7 +55,7 @@ def pytest_generate_tests(metafunc):

association_dir = 'uat' if metafunc.config.option.env == 'uat' else 'ops'
associations = os.listdir(cmr_dirpath.joinpath(association_dir))

if 'collection_concept_id' in metafunc.fixturenames and associations is not None:
metafunc.parametrize("collection_concept_id", associations)
else:
Expand All @@ -69,27 +70,101 @@ def log_global_env_facts(record_testsuite_property, request):
record_testsuite_property("env", request.config.getoption('env'))


def get_error_message(report):

# If it's a regular test failure (not a skipped or xfailed test)
if hasattr(report, 'longreprtext'):
# Extract the short-form failure reason (in pytest >= 6)
error_message = report.longreprtext
else:
# Fallback if longreprtext is not available
if isinstance(report.longrepr, tuple):
error_message = report.longrepr[2]
else:
error_message = str(report.longrepr)

exception_pattern = r"E\s+(\w+):\s+\(([^,]+),\s+'(.+?)'\)"
match = re.search(exception_pattern, error_message)

if match:
exception_type = match.group(1) # 'Exception'
exception_reason = match.group(2) # 'Not Found'
exception_message = match.group(3) # 'Error: EULA ... could not be found.'

# Combine all into one message
full_message = f"Exception Type: {exception_type}, Reason: {exception_reason}, Message: {exception_message}"
return full_message
else:
return "No exception found."


def pytest_terminal_summary(terminalreporter, exitstatus, config):

filtered_success, success, skipped, failed = [], [], [], []

test_results = {'success': filtered_success, 'failed': failed, 'skipped': skipped}

# the fourth keyword is the collection concept id may change if we change the test inputs
skipped.extend([list(skip.keywords)[3] for skip in terminalreporter.stats.get('skipped', [])])
failed.extend([list(failed.keywords)[3] for failed in terminalreporter.stats.get('failed', [])])
success.extend([list(passed.keywords)[3] for passed in terminalreporter.stats.get('passed', [])])
failed_tests = terminalreporter.stats.get('failed', [])
skipped_tests = terminalreporter.stats.get('skipped', [])
success_tests = terminalreporter.stats.get('passed', [])

if failed_tests:
for report in failed_tests:

concept_id = list(report.keywords)[3]

# Have temporal and spatial test if failed either one don't put in success
# Extract the test name and exception message from the report
test_name = report.nodeid
test_type = None

# Convert lists to sets
fail_set = set(failed)
success_set = set(success)
if "spatial" in test_name:
test_type = "spatial"
elif "temporal" in test_name:
test_type = "temporal"

# Remove elements from success that are in fail
set_filtered_success = success_set - fail_set
full_message = get_error_message(report)

# Convert the set back to a list if needed
filtered_success.extend(list(set_filtered_success))
failed.append({
"concept_id": concept_id,
"test_type": test_type,
"message": full_message
})

if skipped_tests:
for report in skipped_tests:

concept_id = list(report.keywords)[3]

# Extract the test name and exception message from the report
test_name = report.nodeid
test_type = None

if "spatial" in test_name:
test_type = "spatial"
elif "temporal" in test_name:
test_type = "temporal"

# If it's a regular test failure (not a skipped or xfailed test)
if hasattr(report, 'longreprtext'):
# Extract the short-form failure reason (in pytest >= 6)
error_message = report.longreprtext
else:
# Fallback if longreprtext is not available
if isinstance(report.longrepr, tuple):
error_message = report.longrepr[2]
else:
error_message = str(report.longrepr)

error = "UNKNOWN"
if isinstance(report.longreprtext, str):
tuple_error = eval(report.longreprtext)
error = tuple_error[2]

skipped.append({
"concept_id": concept_id,
"test_type": test_type,
"message": error
})

env = config.option.env

Expand All @@ -108,5 +183,3 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
json.dump(tests, file)




26 changes: 15 additions & 11 deletions tests/create_or_update_issue.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ def update_issue(repo_name, issue_number, issue_body, github_token):

print(f"Issue updated successfully: {response.json()['html_url']}")


def create_or_update_issue(repo_name, github_token, env):

upper_env = env.upper()
Expand All @@ -185,41 +184,46 @@ def create_or_update_issue(repo_name, github_token, env):

failed = results.get('failed', [])
skipped = results.get('skipped',[])


failed_concept_ids = [collection.get('concept_id') for collection in failed]
skipped_concept_ids = [collection.get('concept_id') for collection in skipped]

no_associations = []
failed_test = []

for collection in failed:
if collection not in current_associations:
no_associations.append(collection)
for collection_concept_id in failed_concept_ids:
if collection_concept_id not in current_associations:
no_associations.append(collection_concept_id)
else:
failed_test.append(collection)
failed_test.append(collection_concept_id)

providers = []
issue_body = None

all_collections = failed + skipped
all_collections = failed_concept_ids + skipped_concept_ids

if len(failed) > 0 or len(skipped) > 0:

for collection in failed:
for collection in failed_concept_ids:
provider = collection.split('-')[1]
if provider not in providers:
providers.append(provider)
for collection in skipped:
for collection in skipped_concept_ids:
provider = collection.split('-')[1]
if provider not in providers:
providers.append(provider)

collection_names = get_collection_names(providers, env, all_collections)
issue_body = datetime.now().strftime("Updated on %m-%d-%Y\n")

print(collection_names)

if len(failed_test) > 0:
issue_body += "\n FAILED: \n"
issue_body += "\n".join(f"{cid} ({collection_names.get(cid, '')})" for cid in failed_test)
issue_body += "\n".join(f"{cid.get('concept_id')} ({collection_names.get(cid.get('concept_id'), '')}) - {cid.get('test_type')} test - {cid.get('message')}" for cid in failed)
if len(skipped) > 0:
issue_body += "\n SKIPPED: \n"
issue_body += "\n".join(f"{cid} ({collection_names.get(cid, '')})" for cid in skipped)
issue_body += "\n".join(f"{cid.get('concept_id')} ({collection_names.get(cid.get('concept_id'), '')}) - {cid.get('test_type')} test - {cid.get('message')}" for cid in skipped)
if len(no_associations) > 0:
issue_body += "\n NO ASSOCIATIONS: \n"
issue_body += "\n".join(f"{cid} ({collection_names.get(cid, '')})" for cid in no_associations)
Expand Down
8 changes: 4 additions & 4 deletions tests/verify_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def granule_json(collection_concept_id: str, cmr_mode: str, bearer_token: str, r
if 'items' in response_json and len(response_json['items']) > 0:
return response_json['items'][0]
elif cmr_mode == cmr.CMR_UAT:
pytest.skip(f"No granules found for UAT collection {collection_concept_id}. CMR search used was {cmr_url}")
pytest.fail(f"No granules found for UAT collection {collection_concept_id}. CMR search used was {cmr_url}")
elif cmr_mode == cmr.CMR_OPS:
pytest.fail(f"No granules found for OPS collection {collection_concept_id}. CMR search used was {cmr_url}")

Expand All @@ -155,7 +155,7 @@ def download_file(url):
if granule_url:
return download_file(granule_url)
else:
pytest.skip(f"Unable to find download URL for {granule_json['meta']['concept-id']}")
pytest.fail(f"Unable to find download URL for {granule_json['meta']['concept-id']}")


@pytest.fixture(scope="function")
Expand All @@ -167,8 +167,8 @@ def collection_variables(cmr_mode, collection_concept_id, env, bearer_token):
collection_associations = collection_res.get("associations")
variable_concept_ids = collection_associations.get("variables")

if variable_concept_ids is None and env == 'uat':
pytest.skip('There are no umm-v associated with this collection in UAT')
if variable_concept_ids is None:
pytest.fail(f'There are no umm-v associated with this collection in {env}')

variables = []
for i in range(0, len(variable_concept_ids), 40):
Expand Down
Loading