diff --git a/features/environment.py b/features/environment.py index 9701b120..81c6796b 100644 --- a/features/environment.py +++ b/features/environment.py @@ -45,6 +45,10 @@ def before_feature(context, feature): context.protocol_errors.append(error) context.gherkin_outcomes = [] + + # display the correct scenario and insanity related to the gherkin outcome in the behave console & ci/cd report + context.scenario_outcome_state= {} + context.instance_outcome_state = {} def before_scenario(context, scenario): @@ -64,6 +68,9 @@ def after_scenario(context, scenario): context._pop() # preserve the outcomes to be serialized to DB in after_feature() context.gherkin_outcomes = old_outcomes + context.scenario_outcome_state[len(context.gherkin_outcomes)] = {'scenario': scenario.name, + 'last_step': scenario.steps[-1]} + def after_feature(context, feature): @@ -119,6 +126,11 @@ def get_or_create_instance_when_set(spf_id): else: # invoked via console or CI/CD pipeline outcomes = [outcome.to_dict() for outcome in context.gherkin_outcomes] + for idx, outcome in enumerate(outcomes): + sls = find_scenario_for_outcome(context, idx + 1) + outcome['scenario'] = sls['scenario'] + outcome['last_step'] = sls['last_step'].name + outcome['instance_id'] = context.instance_outcome_state.get(idx+1, '') outcomes_json_str = json.dumps(outcomes) #ncodes to utf-8 outcomes_bytes = outcomes_json_str.encode("utf-8") for formatter in filter(lambda f: hasattr(f, "embedding"), context._runner.formatters): @@ -126,4 +138,12 @@ def get_or_create_instance_when_set(spf_id): # embed protocol errors protocol_errors_bytes = json.dumps(context.protocol_errors).encode("utf-8") - formatter.embedding(mime_type="application/json", data=protocol_errors_bytes, target='feature', attribute_name='protocol_errors') \ No newline at end of file + formatter.embedding(mime_type="application/json", data=protocol_errors_bytes, target='feature', attribute_name='protocol_errors') + + +def find_scenario_for_outcome(context, outcome_index): + previous_count = 0 + for count, scenario in context.scenario_outcome_state.items(): + if previous_count < outcome_index <= count: + return scenario + previous_count = count \ No newline at end of file diff --git a/features/steps/validation_handling.py b/features/steps/validation_handling.py index 2803bef8..5cc624dc 100644 --- a/features/steps/validation_handling.py +++ b/features/steps/validation_handling.py @@ -232,6 +232,7 @@ def apply_then_operation(fn, inst, context, current_path, depth=0, **kwargs): displayed_inst_override_trigger = "and display entity instance" displayed_inst_override = displayed_inst_override_trigger in context.step.name.lower() inst_to_display = inst if displayed_inst_override else activation_inst + instance_id = safe_method_call(inst_to_display, 'id', None) validation_outcome = ValidationOutcome( outcome_code=get_outcome_code(result, context), @@ -240,7 +241,7 @@ def apply_then_operation(fn, inst, context, current_path, depth=0, **kwargs): feature=context.feature.name, feature_version=misc.define_feature_version(context), severity=OutcomeSeverity.WARNING if any(tag.lower() == "industry-practice" for tag in context.feature.tags) else OutcomeSeverity.ERROR, - instance_id=safe_method_call(inst_to_display, 'id', None), + instance_id=instance_id, validation_task_id=context.validation_task_id ) # suppress the 'display_entity' trigger text if it is used as part of the expected value @@ -250,6 +251,7 @@ def apply_then_operation(fn, inst, context, current_path, depth=0, **kwargs): else validation_outcome.expected) context.gherkin_outcomes.append(validation_outcome) + context.instance_outcome_state[len(context.gherkin_outcomes)] = instance_id # Currently, we should not inject passed outcomes for each individual instance to the databse # if not step_results: diff --git a/main.py b/main.py index 8164db7d..4280f167 100644 --- a/main.py +++ b/main.py @@ -134,11 +134,7 @@ def run(filename, rule_type=RuleType.ALL, with_console_output=False, execution_m 'protocol_errors': protocol_errors, } scenario_validation_outcomes = json.loads(base64.b64decode(el.get('validation_outcomes', [{}])[0].get('data', '')).decode('utf-8')) if el.get('validation_outcomes') else [] - scenario_info = { - 'scenario_name': el['name'], - 'step_names': [step['name'] for step in el['steps']] - } for validation_outcome in scenario_validation_outcomes: - yield validation_outcome | scenario_info + yield validation_outcome os.close(fd) os.unlink(jsonfn) diff --git a/test/test_main.py b/test/test_main.py index f05ff59c..b194df7a 100644 --- a/test/test_main.py +++ b/test/test_main.py @@ -98,8 +98,8 @@ def test_invocation(filename): tabulate_results = [ ( f"{outcome.get('feature')} - v{outcome.get('feature_version')}", # Feature - outcome.get('scenario_name'), # Scenario - outcome.get('step_names')[-1], # Last Step + outcome.get('scenario'), # Scenario + outcome.get('last_step'), # Last Step outcome.get('instance_id'), # Instance f"Expected : {outcome.get('expected')}, Observed : {outcome.get('observed')}", # Message outcome.get('outcome_code') # Code