From 3d2b8cd172934bc33762d0ddc7d772324b48468b Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Wed, 21 Aug 2024 02:27:35 -0700 Subject: [PATCH 01/18] adding mode, type, status, and, potentialy, category --- .../detection_testing/views/DetectionTestingView.py | 2 ++ .../detection_abstract.py | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index 9130fca6..aac6fbda 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -155,7 +155,9 @@ def getSummaryObject( # TODO (#230): expand testing metrics reported # Construct and return the larger results dict result_dict = { + # TODO (cmcginley): differentiate total detections vs total tested "summary": { + "mode": self.config.getModeName().lower(), "success": overall_success, "total_detections": total_detections, "total_pass": total_pass, diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 99e33b12..73748570 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -701,10 +701,13 @@ def tests_validate( # Ensure that there is at least 1 test if len(v) == 0: if info.data.get("tags", None) and info.data.get("tags").manual_test is not None: # type: ignore - # Detections that are manual_test MAY have detections, but it is not required. If they + # Detections that are manual_test MAY have tests, but it is not required. If they # do not have one, then create one which will be a placeholder. # Note that this fake UnitTest (and by extension, Integration Test) will NOT be generated # if there ARE test(s) defined for a Detection. + + # TODO (cmcginley): make this ManualTest; ultimately this test case should be + # explicit and not implicit placeholder_test = UnitTest( # type: ignore name="PLACEHOLDER FOR DETECTION TAGGED MANUAL_TEST WITH NO TESTS SPECIFIED IN YML FILE", attack_data=[] @@ -753,7 +756,7 @@ def all_tests_successful(self) -> bool: def get_summary( self, - detection_fields: list[str] = ["name", "search"], + detection_fields: list[str] = ["name", "type", "status", "source", "search"], test_result_fields: list[str] = ["success", "message", "exception", "status", "duration", "wait_duration"], test_job_fields: list[str] = ["resultCount", "runDuration"], ) -> dict[str, Any]: From d53525edfa5f9f323807b61ddbbd482e691ebda8 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Wed, 21 Aug 2024 16:41:00 -0700 Subject: [PATCH 02/18] added ManualTest; added detection level status; added field aliases for summary --- .../DetectionTestingInfrastructure.py | 8 +- .../views/DetectionTestingViewFile.py | 6 +- .../detection_abstract.py | 143 +++++++++++++----- contentctl/objects/base_test.py | 1 + contentctl/objects/integration_test.py | 8 +- contentctl/objects/integration_test_result.py | 5 +- contentctl/objects/manual_test.py | 32 ++++ contentctl/objects/manual_test_result.py | 8 + ...est_attack_data.py => test_attack_data.py} | 4 +- contentctl/objects/test_group.py | 4 +- contentctl/objects/unit_test.py | 13 +- 11 files changed, 170 insertions(+), 62 deletions(-) create mode 100644 contentctl/objects/manual_test.py create mode 100644 contentctl/objects/manual_test_result.py rename contentctl/objects/{unit_test_attack_data.py => test_attack_data.py} (86%) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index ff4dcaa1..8f92b751 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -29,7 +29,7 @@ from contentctl.objects.base_test import BaseTest from contentctl.objects.unit_test import UnitTest from contentctl.objects.integration_test import IntegrationTest -from contentctl.objects.unit_test_attack_data import UnitTestAttackData +from contentctl.objects.test_attack_data import TestAttackData from contentctl.objects.unit_test_result import UnitTestResult from contentctl.objects.integration_test_result import IntegrationTestResult from contentctl.objects.test_group import TestGroup @@ -1143,7 +1143,7 @@ def retry_search_until_timeout( return - def delete_attack_data(self, attack_data_files: list[UnitTestAttackData]): + def delete_attack_data(self, attack_data_files: list[TestAttackData]): for attack_data_file in attack_data_files: index = attack_data_file.custom_index or self.sync_obj.replay_index host = attack_data_file.host or self.sync_obj.replay_host @@ -1176,7 +1176,7 @@ def replay_attack_data_files( def replay_attack_data_file( self, - attack_data_file: UnitTestAttackData, + attack_data_file: TestAttackData, tmp_dir: str, test_group: TestGroup, test_group_start_time: float, @@ -1244,7 +1244,7 @@ def replay_attack_data_file( def hec_raw_replay( self, tempfile: str, - attack_data_file: UnitTestAttackData, + attack_data_file: TestAttackData, verify_ssl: bool = False, ): if verify_ssl is False: diff --git a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py index 4b31bca7..d8c15e02 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py @@ -13,7 +13,6 @@ class DetectionTestingViewFile(DetectionTestingView): output_filename: str = OUTPUT_FILENAME def getOutputFilePath(self) -> pathlib.Path: - folder_path = pathlib.Path('.') / self.output_folder output_file = folder_path / self.output_filename @@ -27,10 +26,9 @@ def stop(self): output_file = self.getOutputFilePath() folder_path.mkdir(parents=True, exist_ok=True) - - + result_dict = self.getSummaryObject() - + # use the yaml writer class with open(output_file, "w") as res: res.write(yaml.safe_dump(result_dict,sort_keys=False)) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 73748570..3620b0e0 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -1,5 +1,5 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated +from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated, Self import re import pathlib from pydantic import ( @@ -27,9 +27,11 @@ from contentctl.objects.detection_tags import DetectionTags from contentctl.objects.deployment import Deployment from contentctl.objects.unit_test import UnitTest +from contentctl.objects.manual_test import ManualTest from contentctl.objects.test_group import TestGroup from contentctl.objects.integration_test import IntegrationTest from contentctl.objects.data_source import DataSource +from contentctl.objects.base_test_result import TestResultStatus # from contentctl.objects.playbook import Playbook from contentctl.objects.enums import ProvidingTechnology @@ -57,7 +59,7 @@ class Detection_Abstract(SecurityContentObject): # default mode, 'smart' # https://docs.pydantic.dev/latest/concepts/unions/#left-to-right-mode # https://github.com/pydantic/pydantic/issues/9101#issuecomment-2019032541 - tests: List[Annotated[Union[UnitTest, IntegrationTest], Field(union_mode='left_to_right')]] = [] + tests: List[Annotated[Union[UnitTest, IntegrationTest, ManualTest], Field(union_mode='left_to_right')]] = [] # A list of groups of tests, relying on the same data test_groups: Union[list[TestGroup], None] = Field(None, validate_default=True) @@ -122,35 +124,58 @@ def validate_presence_of_filter_macro( return value - @field_validator("test_groups") - @classmethod - def validate_test_groups( - cls, - value: Union[None, List[TestGroup]], - info: ValidationInfo - ) -> Union[List[TestGroup], None]: + @model_validator(mode="after") + def build_test_groups(self) -> Self: """ - Validates the `test_groups` field and constructs the model from the list of unit tests + Builds the `test_groups` field, constructing the model from the list of unit tests if no explicit construct was provided :param value: the value of the field `test_groups` :param values: a dict of the other fields in the Detection model """ # if the value was not the None default, do nothing - if value is not None: - return value + if self.test_groups is not None: + return self - # iterate over the unit tests and create a TestGroup (and as a result, an IntegrationTest) for each - test_groups: list[TestGroup] = [] - tests: list[UnitTest | IntegrationTest] = info.data.get("tests") # type: ignore - unit_test: UnitTest - for unit_test in tests: # type: ignore - test_group = TestGroup.derive_from_unit_test(unit_test, info.data.get("name")) # type: ignore - test_groups.append(test_group) + # iterate over the tests and create a TestGroup (and as a result, an IntegrationTest) for + # each unit test + self.test_groups = [] + for test in self.tests: + # We only derive TestGroups from UnitTests (ManualTest is ignored and IntegrationTests + # have not been created yet) + if isinstance(test, UnitTest): + test_group = TestGroup.derive_from_unit_test(test, self.name) + self.test_groups.append(test_group) # now add each integration test to the list of tests - for test_group in test_groups: - tests.append(test_group.integration_test) - return test_groups + for test_group in self.test_groups: + self.tests.append(test_group.integration_test) + + return self + + @property + def test_status(self) -> TestResultStatus | None: + """ + Returns the collective status of the detections tests. If any test status has yet to be set, + None is returned.If any test failed or errored, FAIL is returned. If all tests were skipped, + SKIP is returned. If at least one test passed and the rest passed or skipped, PASS is returned. + """ + passed = 0 + skipped = 0 + for test in self.tests: + if test.result is None or test.result.status is None: + return None + elif test.result.status == TestResultStatus.ERROR or test.result.status == TestResultStatus.FAIL: + return TestResultStatus.FAIL + elif test.result.status == TestResultStatus.SKIP: + skipped += 1 + elif test.result.status == TestResultStatus.PASS: + passed += 1 + if passed > 0: + return TestResultStatus.PASS + elif skipped == len(self.tests): + return TestResultStatus.SKIP + + raise ValueError(f"Undefined test status for detection: {self.name}") @computed_field @property @@ -443,6 +468,8 @@ def model_post_init(self, __context: Any) -> None: self.cve_enrichment_func(__context) + self.skip_manual_tests() + @field_validator('lookups', mode="before") @classmethod def getDetectionLookups(cls, v: list[str], info: ValidationInfo) -> list[Lookup]: @@ -672,12 +699,6 @@ def ensurePresenceOfRequiredTests(self): if self.type in set([AnalyticsType.Correlation.value]): return self - if self.tags.manual_test is not None: - for test in self.tests: - test.skip( - f"TEST SKIPPED: Detection marked as 'manual_test' with explanation: '{self.tags.manual_test}'" - ) - if len(self.tests) == 0: raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}") @@ -686,9 +707,9 @@ def ensurePresenceOfRequiredTests(self): @field_validator("tests") def tests_validate( cls, - v: list[UnitTest | IntegrationTest], + v: list[UnitTest | IntegrationTest | ManualTest], info: ValidationInfo - ) -> list[UnitTest | IntegrationTest]: + ) -> list[UnitTest | IntegrationTest | ManualTest]: # Only production analytics require tests if info.data.get("status", "") != DetectionStatus.production.value: return v @@ -698,17 +719,39 @@ def tests_validate( if info.data.get("type", "") in set([AnalyticsType.Correlation.value]): return v + # Pull the tags and a flag indicating if this is a manual_test detection or not + tags: DetectionTags | None = info.data.get("tags", None) + + # Since we ManualTest and UnitTest are not differentiable without looking at the manual_test + # tag, Pydantic builds all tests as UnitTest objects. If we see the manual_test flag, we + # convert these to ManualTest + tmp: list[UnitTest | IntegrationTest | ManualTest] = [] + if tags is not None and tags.manual_test is not None: + for test in v: + if not isinstance(test, UnitTest): + raise ValueError( + "At this point of intialization, tests should only be UnitTest objects, " + f"but encountered a {type(test)}." + ) + # Create the manual test and skip it upon creation (cannot test via contentctl) + manual_test = ManualTest( + name=test.name, + attack_data=test.attack_data + ) + tmp.append(manual_test) + v = tmp + # Ensure that there is at least 1 test if len(v) == 0: - if info.data.get("tags", None) and info.data.get("tags").manual_test is not None: # type: ignore + if tags is not None and tags.manual_test is not None: # Detections that are manual_test MAY have tests, but it is not required. If they # do not have one, then create one which will be a placeholder. - # Note that this fake UnitTest (and by extension, Integration Test) will NOT be generated - # if there ARE test(s) defined for a Detection. + # Note that this fake ManualTest will NOT be generated if there ARE test(s) defined + # for a Detection. # TODO (cmcginley): make this ManualTest; ultimately this test case should be # explicit and not implicit - placeholder_test = UnitTest( # type: ignore + placeholder_test = ManualTest( # type: ignore name="PLACEHOLDER FOR DETECTION TAGGED MANUAL_TEST WITH NO TESTS SPECIFIED IN YML FILE", attack_data=[] ) @@ -722,6 +765,27 @@ def tests_validate( # No issues - at least one test provided for production type requiring testing return v + def skip_manual_tests(self) -> None: + """ + Skips all ManualTests, if the manual_test flag is set; also raises an error if any other + test types are found for a manual_test detection + """ + # Skip all ManualTest + if self.tags.manual_test is not None: + for test in self.tests: + if isinstance(test, ManualTest): + test.skip( + message=( + "TEST SKIPPED (MANUAL): Detection marked as 'manual_test' with " + f"explanation: {self.tags.manual_test}" + ) + ) + else: + raise ValueError( + "A detection with the manual_test flag should only have tests of type " + "ManualTest" + ) + def all_tests_successful(self) -> bool: """ Checks that all tests in the detection succeeded. If no tests are defined, consider that a @@ -756,7 +820,10 @@ def all_tests_successful(self) -> bool: def get_summary( self, - detection_fields: list[str] = ["name", "type", "status", "source", "search"], + detection_fields: list[str] = ["name", "type", "status", "test_status", "source", "data_source", "search"], + detection_field_aliases: dict[str, str] = { + "status": "production_status", "test_status": "status", "source": "source_category" + }, test_result_fields: list[str] = ["success", "message", "exception", "status", "duration", "wait_duration"], test_job_fields: list[str] = ["resultCount", "runDuration"], ) -> dict[str, Any]: @@ -772,7 +839,11 @@ def get_summary( # Grab the top level detection fields for field in detection_fields: - summary_dict[field] = getattr(self, field) + value = getattr(self, field) + if field in detection_field_aliases: + summary_dict[detection_field_aliases[field]] = value + else: + summary_dict[field] = value # Set success based on whether all tests passed summary_dict["success"] = self.all_tests_successful() diff --git a/contentctl/objects/base_test.py b/contentctl/objects/base_test.py index cef641f0..20e681cf 100644 --- a/contentctl/objects/base_test.py +++ b/contentctl/objects/base_test.py @@ -13,6 +13,7 @@ class TestType(str, Enum): """ UNIT = "unit" INTEGRATION = "integration" + MANUAL = "manual" def __str__(self) -> str: return self.value diff --git a/contentctl/objects/integration_test.py b/contentctl/objects/integration_test.py index c7cb5119..4f88be70 100644 --- a/contentctl/objects/integration_test.py +++ b/contentctl/objects/integration_test.py @@ -1,5 +1,3 @@ -from typing import Union - from pydantic import Field from contentctl.objects.base_test import BaseTest, TestType @@ -13,10 +11,10 @@ class IntegrationTest(BaseTest): An integration test for a detection against ES """ # The test type (integration) - test_type: TestType = Field(TestType.INTEGRATION) + test_type: TestType = Field(default=TestType.INTEGRATION) # The test result - result: Union[None, IntegrationTestResult] = None + result: IntegrationTestResult | None = None @classmethod def derive_from_unit_test(cls, unit_test: UnitTest) -> "IntegrationTest": @@ -36,7 +34,7 @@ def skip(self, message: str) -> None: Skip the test by setting its result status :param message: the reason for skipping """ - self.result = IntegrationTestResult( + self.result = IntegrationTestResult( # type: ignore message=message, status=TestResultStatus.SKIP ) diff --git a/contentctl/objects/integration_test_result.py b/contentctl/objects/integration_test_result.py index e746731e..66553def 100644 --- a/contentctl/objects/integration_test_result.py +++ b/contentctl/objects/integration_test_result.py @@ -1,7 +1,6 @@ -from typing import Optional from contentctl.objects.base_test_result import BaseTestResult - +# TODO (cmcginley): this seems unused, can I delete? SAVED_SEARCH_TEMPLATE = "{server}:{web_port}/en-US/{path}" @@ -10,4 +9,4 @@ class IntegrationTestResult(BaseTestResult): An integration test result """ # the total time we slept waiting for the detection to fire after activating it - wait_duration: Optional[int] = None + wait_duration: int | None = None diff --git a/contentctl/objects/manual_test.py b/contentctl/objects/manual_test.py new file mode 100644 index 00000000..cc6e71a5 --- /dev/null +++ b/contentctl/objects/manual_test.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from pydantic import Field + +from contentctl.objects.test_attack_data import TestAttackData +from contentctl.objects.manual_test_result import ManualTestResult +from contentctl.objects.base_test import BaseTest, TestType +from contentctl.objects.base_test_result import TestResultStatus + + +class ManualTest(BaseTest): + """ + A manual test for a detection + """ + # The test type (manual) + test_type: TestType = Field(default=TestType.MANUAL) + + # The attack data to be ingested for the manual test + attack_data: list[TestAttackData] + + # The result of the manual test + result: ManualTestResult | None = None + + def skip(self, message: str) -> None: + """ + Skip the test by setting its result status + :param message: the reason for skipping + """ + self.result = ManualTestResult( # type: ignore + message=message, + status=TestResultStatus.SKIP + ) diff --git a/contentctl/objects/manual_test_result.py b/contentctl/objects/manual_test_result.py new file mode 100644 index 00000000..dd6439cd --- /dev/null +++ b/contentctl/objects/manual_test_result.py @@ -0,0 +1,8 @@ +from contentctl.objects.base_test_result import BaseTestResult + + +class ManualTestResult(BaseTestResult): + """ + A manual test result + """ + pass diff --git a/contentctl/objects/unit_test_attack_data.py b/contentctl/objects/test_attack_data.py similarity index 86% rename from contentctl/objects/unit_test_attack_data.py rename to contentctl/objects/test_attack_data.py index 7a4d5d8a..8421199b 100644 --- a/contentctl/objects/unit_test_attack_data.py +++ b/contentctl/objects/test_attack_data.py @@ -3,11 +3,11 @@ from typing import Union, Optional -class UnitTestAttackData(BaseModel): +class TestAttackData(BaseModel): data: Union[HttpUrl, FilePath] = Field(...) # TODO - should source and sourcetype should be mapped to a list # of supported source and sourcetypes in a given environment? source: str = Field(...) sourcetype: str = Field(...) custom_index: Optional[str] = None - host: Optional[str] = None \ No newline at end of file + host: Optional[str] = None diff --git a/contentctl/objects/test_group.py b/contentctl/objects/test_group.py index abf71566..6156f68d 100644 --- a/contentctl/objects/test_group.py +++ b/contentctl/objects/test_group.py @@ -2,7 +2,7 @@ from contentctl.objects.unit_test import UnitTest from contentctl.objects.integration_test import IntegrationTest -from contentctl.objects.unit_test_attack_data import UnitTestAttackData +from contentctl.objects.test_attack_data import TestAttackData from contentctl.objects.base_test_result import TestResultStatus @@ -18,7 +18,7 @@ class TestGroup(BaseModel): name: str unit_test: UnitTest integration_test: IntegrationTest - attack_data: list[UnitTestAttackData] + attack_data: list[TestAttackData] @classmethod def derive_from_unit_test(cls, unit_test: UnitTest, name_prefix: str) -> "TestGroup": diff --git a/contentctl/objects/unit_test.py b/contentctl/objects/unit_test.py index d8194dfc..66ed7921 100644 --- a/contentctl/objects/unit_test.py +++ b/contentctl/objects/unit_test.py @@ -1,10 +1,9 @@ from __future__ import annotations -from typing import Union from pydantic import Field from contentctl.objects.unit_test_baseline import UnitTestBaseline -from contentctl.objects.unit_test_attack_data import UnitTestAttackData +from contentctl.objects.test_attack_data import TestAttackData from contentctl.objects.unit_test_result import UnitTestResult from contentctl.objects.base_test import BaseTest, TestType from contentctl.objects.base_test_result import TestResultStatus @@ -17,19 +16,21 @@ class UnitTest(BaseTest): # contentType: SecurityContentType = SecurityContentType.unit_tests # The test type (unit) - test_type: TestType = Field(TestType.UNIT) + test_type: TestType = Field(default=TestType.UNIT) + # TODO (cmcginley): looks like pass_condition has gone the way of the dodo; can I remove? # The condition to check if the search was successful - pass_condition: Union[str, None] = None + pass_condition: str | None = None + # TODO (cmcginley): looks like baselines has gone the way of the dodo; can I remove? # Baselines to be run before a unit test baselines: list[UnitTestBaseline] = [] # The attack data to be ingested for the unit test - attack_data: list[UnitTestAttackData] + attack_data: list[TestAttackData] # The result of the unit test - result: Union[None, UnitTestResult] = None + result: UnitTestResult | None = None def skip(self, message: str) -> None: """ From fbd7587040c5057302317a1b60ba83aac448604f Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Wed, 21 Aug 2024 17:02:17 -0700 Subject: [PATCH 03/18] moving ManualTest conversion to start of func --- .../detection_abstract.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 3620b0e0..c1eca2c5 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -710,15 +710,6 @@ def tests_validate( v: list[UnitTest | IntegrationTest | ManualTest], info: ValidationInfo ) -> list[UnitTest | IntegrationTest | ManualTest]: - # Only production analytics require tests - if info.data.get("status", "") != DetectionStatus.production.value: - return v - - # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. - # Accordingly, we do not need to do additional checks if the type is Correlation - if info.data.get("type", "") in set([AnalyticsType.Correlation.value]): - return v - # Pull the tags and a flag indicating if this is a manual_test detection or not tags: DetectionTags | None = info.data.get("tags", None) @@ -741,6 +732,15 @@ def tests_validate( tmp.append(manual_test) v = tmp + # Only production analytics require tests + if info.data.get("status", "") != DetectionStatus.production.value: + return v + + # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. + # Accordingly, we do not need to do additional checks if the type is Correlation + if info.data.get("type", "") in set([AnalyticsType.Correlation.value]): + return v + # Ensure that there is at least 1 test if len(v) == 0: if tags is not None and tags.manual_test is not None: From 46d0ccc7e736f3c29c6d1eb6377aee00e4385d58 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Wed, 21 Aug 2024 17:35:06 -0700 Subject: [PATCH 04/18] stopping filtering of detections from list --- contentctl/actions/test.py | 38 ++++++++++--------- .../detection_abstract.py | 26 ++++++++++++- 2 files changed, 45 insertions(+), 19 deletions(-) diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index a4898ea4..33074f10 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -46,29 +46,31 @@ class TestInputDto: class Test: def filter_detections(self, input_dto: TestInputDto)->TestInputDto: - if not input_dto.config.enable_integration_testing: - #Skip all integraiton tests if integration testing is not enabled: + # Skip all integraiton tests if integration testing is not enabled: for detection in input_dto.detections: for test in detection.tests: if isinstance(test, IntegrationTest): test.skip("TEST SKIPPED: Skipping all integration tests") - - list_after_filtering:List[Detection] = [] - #extra filtering which may be removed/modified in the future - for detection in input_dto.detections: - if (detection.status != DetectionStatus.production.value): - #print(f"{detection.name} - Not testing because [STATUS: {detection.status}]") - pass - elif detection.type == AnalyticsType.Correlation: - #print(f"{detection.name} - Not testing because [ TYPE: {detection.type}]") - pass - else: - list_after_filtering.append(detection) - - return TestInputDto(list_after_filtering, input_dto.config) - - + + # TODO (cmcginley): would we rather skip these detections here instead of during model + # creation? + # list_after_filtering:List[Detection] = [] + # #extra filtering which may be removed/modified in the future + # for detection in input_dto.detections: + # if (detection.status != DetectionStatus.production.value): + # #print(f"{detection.name} - Not testing because [STATUS: {detection.status}]") + # pass + # elif detection.type == AnalyticsType.Correlation: + # #print(f"{detection.name} - Not testing because [ TYPE: {detection.type}]") + # pass + # else: + # list_after_filtering.append(detection) + + # return TestInputDto(list_after_filtering, input_dto.config) + + return input_dto + def execute(self, input_dto: TestInputDto) -> bool: diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index c1eca2c5..6f8c2126 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -39,6 +39,11 @@ MISSING_SOURCES: set[str] = set() +# Those AnalyticsTypes that we do not test via contentctl +UNTESTED_ANALYTICS_TYPES: set[str] = { + AnalyticsType.Correlation.value +} + class Detection_Abstract(SecurityContentObject): model_config = ConfigDict(use_enum_values=True) @@ -470,6 +475,21 @@ def model_post_init(self, __context: Any) -> None: self.skip_manual_tests() + # NOTE: we ignore the type error around self.status because we are using Pydantic's + # use_enum_values configuration + # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name + + # Skip tests for non-production detections + if self.status != DetectionStatus.production.value: # type: ignore + self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status}).") + + # TODO (cmcginley): should we just mark all Correlation type detections as manual_test? + # Skip tests for detecton types like Correlation which are not supported via contentctl + if self.type in UNTESTED_ANALYTICS_TYPES: + self.skip_all_tests( + f"TEST SKIPPED: Detection type {self.type} cannot be tested by contentctl." + ) + @field_validator('lookups', mode="before") @classmethod def getDetectionLookups(cls, v: list[str], info: ValidationInfo) -> list[Lookup]: @@ -696,7 +716,7 @@ def ensurePresenceOfRequiredTests(self): # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. # Accordingly, we do not need to do additional checks if the type is Correlation - if self.type in set([AnalyticsType.Correlation.value]): + if self.type in UNTESTED_ANALYTICS_TYPES: return self if len(self.tests) == 0: @@ -765,6 +785,10 @@ def tests_validate( # No issues - at least one test provided for production type requiring testing return v + def skip_all_tests(self, message: str = "TEST SKIPPED") -> None: + for test in self.tests: + test.skip(message=message) + def skip_manual_tests(self) -> None: """ Skips all ManualTests, if the manual_test flag is set; also raises an error if any other From 14ce0f26daa32e7a1f204fe6091ab9869ba4459c Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Thu, 22 Aug 2024 11:42:54 -0700 Subject: [PATCH 05/18] adding more metrics to reports, debugging --- .../DetectionTestingInfrastructure.py | 1 + .../views/DetectionTestingView.py | 87 ++++++++++++------- contentctl/actions/test.py | 31 +++++-- contentctl/contentctl.py | 1 + .../detection_abstract.py | 38 ++++++-- contentctl/objects/config.py | 8 +- 6 files changed, 118 insertions(+), 48 deletions(-) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index 8f92b751..007a83ce 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -396,6 +396,7 @@ def execute(self): self.sync_obj.outputQueue.append(detection) self.sync_obj.currentTestingQueue[self.get_name()] = None + # TODO (cmgcginley): 2024-08-22 02:08:23,726 - INFO - security-content-automation [MainThread] - Error stopping view: ('cannot represent an object', ) def test_detection(self, detection: Detection) -> None: """ Tests a single detection; iterates over the TestGroups for the detection (one TestGroup per diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index aac6fbda..fd8f5432 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -1,5 +1,6 @@ import abc import datetime +from typing import Any from pydantic import BaseModel @@ -10,6 +11,7 @@ ) from contentctl.helper.utils import Utils from contentctl.objects.enums import DetectionStatus +from contentctl.objects.base_test_result import TestResultStatus class DetectionTestingView(BaseModel, abc.ABC): @@ -82,10 +84,14 @@ def getSummaryObject( :returns: summary dict """ # Init the list of tested detections, and some metrics aggregate counters - tested_detections = [] + tested_detections: list[dict[str, Any]] = [] total_pass = 0 total_fail = 0 total_skipped = 0 + total_production = 0 + total_experimental = 0 + total_deprecated = 0 + total_manual = 0 # Iterate the detections tested (anything in the output queue was tested) for detection in self.sync_obj.outputQueue: @@ -94,47 +100,60 @@ def getSummaryObject( test_job_fields=test_job_fields, test_result_fields=test_result_fields ) + # TODO (cmcginley): I find the enum to string conversion we use to be v confusing # Aggregate detection pass/fail metrics - if summary["success"] is False: + if detection.test_status == TestResultStatus.FAIL: total_fail += 1 - else: - #Test is marked as a success, but we need to determine if there were skipped unit tests - #SKIPPED tests still show a success in this field, but we want to count them differently - pass_increment = 1 - for test in summary.get("tests"): - if test.get("test_type") == "unit" and test.get("status") == "skip": - total_skipped += 1 - #Test should not count as a pass, so do not increment the count - pass_increment = 0 - break - total_pass += pass_increment - + elif detection.test_status == TestResultStatus.PASS: + total_pass += 1 + elif detection.test_status == TestResultStatus.SKIP: + total_skipped += 1 + + # Aggregate production status metrics + if detection.status == DetectionStatus.production.value: # type: ignore + total_production += 1 + elif detection.status == DetectionStatus.experimental.value: # type: ignore + total_experimental += 1 + elif detection.status == DetectionStatus.deprecated.value: # type: ignore + total_deprecated += 1 + + # Check if the detection is manual_test + if detection.tags.manual_test is not None: + total_manual += 1 # Append to our list tested_detections.append(summary) - # Sort s.t. all failures appear first (then by name) - #Second short condition is a hack to get detections with unit skipped tests to appear above pass tests - tested_detections.sort(key=lambda x: (x["success"], 0 if x.get("tests",[{}])[0].get("status","status_missing")=="skip" else 1, x["name"])) + # Sort s.t. all failures appear first, then passed before skipped detections, then + # detections w/ tests before those w/o, then by name + tested_detections.sort( + key=lambda x: ( + x["success"], + 0 if x["status"] == TestResultStatus.PASS.value else 1, + 0 if len(x["tests"]) > 0 else 1, + x["name"] + ) + ) # Aggregate summaries for the untested detections (anything still in the input queue was untested) total_untested = len(self.sync_obj.inputQueue) - untested_detections = [] + untested_detections: list[dict[str, Any]] = [] for detection in self.sync_obj.inputQueue: untested_detections.append(detection.get_summary()) # Sort by detection name untested_detections.sort(key=lambda x: x["name"]) + # TODO (cmcginley): I think skippedQueue is no longer used? # Get lists of detections (name only) that were skipped due to their status (experimental or deprecated) - experimental_detections = sorted([ - detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.experimental.value - ]) - deprecated_detections = sorted([ - detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.deprecated.value - ]) - - # If any detection failed, the overall success is False + # experimental_detections = sorted([ + # detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.experimental.value + # ]) + # deprecated_detections = sorted([ + # detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.deprecated.value + # ]) + + # If any detection failed, or if there are untested detections, the overall success is False if (total_fail + len(untested_detections)) == 0: overall_success = True else: @@ -143,13 +162,15 @@ def getSummaryObject( # Compute total detections total_detections = total_fail + total_pass + total_untested + total_skipped + # Compute total detections actually tested (at least one test not skipped) + total_tested_detections = total_fail + total_pass # Compute the percentage of completion for testing, as well as the success rate percent_complete = Utils.getPercent( len(tested_detections), len(untested_detections), 1 ) success_rate = Utils.getPercent( - total_pass, total_detections-total_skipped, 1 + total_pass, total_tested_detections, 1 ) # TODO (#230): expand testing metrics reported @@ -157,21 +178,23 @@ def getSummaryObject( result_dict = { # TODO (cmcginley): differentiate total detections vs total tested "summary": { - "mode": self.config.getModeName().lower(), + "mode": self.config.getModeName(), "success": overall_success, "total_detections": total_detections, + "total_tested_detections": total_tested_detections, "total_pass": total_pass, "total_fail": total_fail, "total_skipped": total_skipped, "total_untested": total_untested, - "total_experimental_or_deprecated": len(deprecated_detections+experimental_detections), + "total_production": total_production, + "total_experimental": total_experimental, + "total_deprecated": total_deprecated, + "total_manual": total_manual, + "total_other_skips": total_skipped - total_deprecated - total_experimental - total_manual, "success_rate": success_rate, }, "tested_detections": tested_detections, "untested_detections": untested_detections, "percent_complete": percent_complete, - "deprecated_detections": deprecated_detections, - "experimental_detections": experimental_detections - } return result_dict diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index 33074f10..ec69bd01 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -90,14 +90,15 @@ def execute(self, input_dto: TestInputDto) -> bool: input_dto=manager_input_dto, output_dto=output_dto ) + mode = input_dto.config.getModeName() if len(input_dto.detections) == 0: - print(f"With Detection Testing Mode '{input_dto.config.getModeName()}', there were [0] detections found to test.\nAs such, we will quit immediately.") + print(f"With Detection Testing Mode '{mode}', there were [0] detections found to test.\nAs such, we will quit immediately.") # Directly call stop so that the summary.yml will be generated. Of course it will not have any test results, but we still want it to contain # a summary showing that now detections were tested. file.stop() else: - print(f"MODE: [{input_dto.config.getModeName()}] - Test [{len(input_dto.detections)}] detections") - if input_dto.config.mode in [DetectionTestingMode.changes, DetectionTestingMode.selected]: + print(f"MODE: [{mode}] - Test [{len(input_dto.detections)}] detections") + if mode in [DetectionTestingMode.changes.value, DetectionTestingMode.selected.value]: files_string = '\n- '.join([str(pathlib.Path(detection.file_path).relative_to(input_dto.config.path)) for detection in input_dto.detections]) print(f"Detections:\n- {files_string}") @@ -108,7 +109,7 @@ def execute(self, input_dto: TestInputDto) -> bool: summary_results = file.getSummaryObject() summary = summary_results.get("summary", {}) - print("Test Summary") + print(f"Test Summary (mode: {summary.get('mode','Error')})") print(f"\tSuccess : {summary.get('success',False)}") print( f"\tSuccess Rate : {summary.get('success_rate','ERROR')}" @@ -117,10 +118,28 @@ def execute(self, input_dto: TestInputDto) -> bool: f"\tTotal Detections : {summary.get('total_detections','ERROR')}" ) print( - f"\tPassed Detections : {summary.get('total_pass','ERROR')}" + f"\tTotal Tested Detections : {summary.get('total_tested_detections','ERROR')}" ) print( - f"\tFailed Detections : {summary.get('total_fail','ERROR')}" + f"\t Passed Detections : {summary.get('total_pass','ERROR')}" + ) + print( + f"\t Failed Detections : {summary.get('total_fail','ERROR')}" + ) + print( + f"\tSkipped Detections : {summary.get('total_skipped','ERROR')}" + ) + print( + f"\t Experimental Detections : {summary.get('total_experimental','ERROR')}" + ) + print( + f"\t Deprecated Detections : {summary.get('total_deprecated','ERROR')}" + ) + print( + f"\t Manually Tested Detections : {summary.get('total_manual','ERROR')}" + ) + print( + f"\t Other Skipped Detections : {summary.get('total_other_skips','ERROR')}" ) print( f"\tUntested Detections : {summary.get('total_untested','ERROR')}" diff --git a/contentctl/contentctl.py b/contentctl/contentctl.py index 3f9d054d..2ec03507 100644 --- a/contentctl/contentctl.py +++ b/contentctl/contentctl.py @@ -114,6 +114,7 @@ def test_common_func(config:test_common): t = Test() + # TODO (cmcginley): what language do we want here? # Remove detections that we do not want to test because they are # not production, the correct type, or manual_test only filted_test_input_dto = t.filter_detections(test_input_dto) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 6f8c2126..688a1bd9 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -2,6 +2,8 @@ from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated, Self import re import pathlib +from enum import Enum + from pydantic import ( field_validator, model_validator, @@ -12,6 +14,7 @@ ConfigDict, FilePath ) + from contentctl.objects.macro import Macro from contentctl.objects.lookup import Lookup if TYPE_CHECKING: @@ -164,23 +167,38 @@ def test_status(self) -> TestResultStatus | None: None is returned.If any test failed or errored, FAIL is returned. If all tests were skipped, SKIP is returned. If at least one test passed and the rest passed or skipped, PASS is returned. """ + # If the detection has no tests, we consider it to have been skipped (only non-production, + # non-manual, non-correlation detections are allowed to have no tests defined) + if len(self.tests) == 0: + return TestResultStatus.SKIP + passed = 0 skipped = 0 for test in self.tests: + # If the result/status of any test has not yet been set, return None if test.result is None or test.result.status is None: return None elif test.result.status == TestResultStatus.ERROR or test.result.status == TestResultStatus.FAIL: + # If any test failed or errored, return fail (we don't return the error state at + # the aggregate detection level) return TestResultStatus.FAIL elif test.result.status == TestResultStatus.SKIP: skipped += 1 elif test.result.status == TestResultStatus.PASS: passed += 1 - if passed > 0: + else: + raise ValueError( + f"Undefined test status for test ({test.name}) in detection ({self.name})" + ) + + # If at least one of the tests passed and the rest passed or skipped, report pass + if passed > 0 and (passed + skipped) == len(self.tests): return TestResultStatus.PASS elif skipped == len(self.tests): + # If all tests skipped, return skip return TestResultStatus.SKIP - raise ValueError(f"Undefined test status for detection: {self.name}") + raise ValueError(f"Undefined overall test status for detection: {self.name}") @computed_field @property @@ -481,13 +499,13 @@ def model_post_init(self, __context: Any) -> None: # Skip tests for non-production detections if self.status != DetectionStatus.production.value: # type: ignore - self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status}).") + self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status})") # TODO (cmcginley): should we just mark all Correlation type detections as manual_test? # Skip tests for detecton types like Correlation which are not supported via contentctl if self.type in UNTESTED_ANALYTICS_TYPES: self.skip_all_tests( - f"TEST SKIPPED: Detection type {self.type} cannot be tested by contentctl." + f"TEST SKIPPED: Detection type {self.type} cannot be tested by contentctl" ) @field_validator('lookups', mode="before") @@ -819,9 +837,11 @@ def all_tests_successful(self) -> bool: :returns: bool where True indicates all tests succeeded (they existed, complete and were PASS/SKIP) """ - # If no tests are defined, we consider it a failure for the detection + # If no tests are defined, we consider it a success for the detection (this detection was + # skipped for testing). Note that the existence of at least one test is enforced by Pydantic + # validation already, with a few specific exceptions if len(self.tests) == 0: - return False + return True # Iterate over tests for test in self.tests: @@ -864,6 +884,12 @@ def get_summary( # Grab the top level detection fields for field in detection_fields: value = getattr(self, field) + + # Enums cannot be serialized directly, so we convert it to a string + if isinstance(getattr(self, field), Enum): + value = str(value) + + # Alias any fields as needed if field in detection_field_aliases: summary_dict[detection_field_aliases[field]] = value else: diff --git a/contentctl/objects/config.py b/contentctl/objects/config.py index edf4ea64..319affdc 100644 --- a/contentctl/objects/config.py +++ b/contentctl/objects/config.py @@ -16,7 +16,7 @@ from contentctl.helper.utils import Utils from urllib.parse import urlparse from abc import ABC, abstractmethod -from contentctl.objects.enums import PostTestBehavior +from contentctl.objects.enums import PostTestBehavior, DetectionTestingMode from contentctl.objects.detection import Detection import tqdm @@ -674,11 +674,11 @@ def checkPlanOnlyUse(self)->Self: def getModeName(self)->str: if isinstance(self.mode, All): - return "All" + return DetectionTestingMode.all.value elif isinstance(self.mode, Changes): - return "Changes" + return DetectionTestingMode.changes.value else: - return "Selected" + return DetectionTestingMode.selected.value From fc700a5fb6352a2c4b9eedcb57d80339a4ae54e5 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Thu, 22 Aug 2024 16:39:42 -0700 Subject: [PATCH 06/18] fixed issue w/ duplicate integration tests --- .../DetectionTestingInfrastructure.py | 1 + .../views/DetectionTestingView.py | 21 ++- .../detection_abstract.py | 154 ++++++++---------- 3 files changed, 89 insertions(+), 87 deletions(-) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index 007a83ce..f1fbf177 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -878,6 +878,7 @@ def execute_integration_test( service=self.get_conn(), pbar_data=pbar_data, ) + assert detection == correlation_search.detection # Run the test test.result = correlation_search.test() diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index fd8f5432..d444fbad 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -85,6 +85,7 @@ def getSummaryObject( """ # Init the list of tested detections, and some metrics aggregate counters tested_detections: list[dict[str, Any]] = [] + skipped_detections: list[dict[str, Any]] = [] total_pass = 0 total_fail = 0 total_skipped = 0 @@ -121,15 +122,23 @@ def getSummaryObject( if detection.tags.manual_test is not None: total_manual += 1 - # Append to our list - tested_detections.append(summary) + # Append to our list (skipped or tested) + if detection.test_status == TestResultStatus.SKIP: + skipped_detections.append(summary) + else: + tested_detections.append(summary) - # Sort s.t. all failures appear first, then passed before skipped detections, then - # detections w/ tests before those w/o, then by name + # Sort tested detections s.t. all failures appear first, then by name tested_detections.sort( key=lambda x: ( x["success"], - 0 if x["status"] == TestResultStatus.PASS.value else 1, + x["name"] + ) + ) + + # Sort skipped detections s.t. detections w/ tests appear before those w/o, then by name + skipped_detections.sort( + key=lambda x: ( 0 if len(x["tests"]) > 0 else 1, x["name"] ) @@ -179,6 +188,7 @@ def getSummaryObject( # TODO (cmcginley): differentiate total detections vs total tested "summary": { "mode": self.config.getModeName(), + "enable_integration_testing": self.config.enable_integration_testing, "success": overall_success, "total_detections": total_detections, "total_tested_detections": total_tested_detections, @@ -194,6 +204,7 @@ def getSummaryObject( "success_rate": success_rate, }, "tested_detections": tested_detections, + "skipped_detections": skipped_detections, "untested_detections": untested_detections, "percent_complete": percent_complete, } diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 688a1bd9..680c28e2 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -132,17 +132,30 @@ def validate_presence_of_filter_macro( return value - @model_validator(mode="after") - def build_test_groups(self) -> Self: + def adjust_tests_and_groups(self) -> None: """ - Builds the `test_groups` field, constructing the model from the list of unit tests - if no explicit construct was provided - :param value: the value of the field `test_groups` - :param values: a dict of the other fields in the Detection model + Converts UnitTest to ManualTest as needed, B=builds the `test_groups` field, constructing + the model from the list of unit tests. Also, preemptively skips all manual tests, as well as + tests for experimental/deprecated detections and Correlation type detections. """ - # if the value was not the None default, do nothing - if self.test_groups is not None: - return self + # Since ManualTest and UnitTest are not differentiable without looking at the manual_test + # tag, Pydantic builds all tests as UnitTest objects. If we see the manual_test flag, we + # convert these to ManualTest + tmp: list[UnitTest | IntegrationTest | ManualTest] = [] + if self.tags.manual_test is not None: + for test in self.tests: + if not isinstance(test, UnitTest): + raise ValueError( + "At this point of intialization, tests should only be UnitTest objects, " + f"but encountered a {type(test)}." + ) + # Create the manual test and skip it upon creation (cannot test via contentctl) + manual_test = ManualTest( + name=test.name, + attack_data=test.attack_data + ) + tmp.append(manual_test) + self.tests = tmp # iterate over the tests and create a TestGroup (and as a result, an IntegrationTest) for # each unit test @@ -158,7 +171,23 @@ def build_test_groups(self) -> Self: for test_group in self.test_groups: self.tests.append(test_group.integration_test) - return self + # Skip all manual tests + self.skip_manual_tests() + + # NOTE: we ignore the type error around self.status because we are using Pydantic's + # use_enum_values configuration + # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name + + # Skip tests for non-production detections + if self.status != DetectionStatus.production.value: # type: ignore + self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status})") + + # TODO (cmcginley): should we just mark all Correlation type detections as manual_test? + # Skip tests for detecton types like Correlation which are not supported via contentctl + if self.type in UNTESTED_ANALYTICS_TYPES: + self.skip_all_tests( + f"TEST SKIPPED: Detection type {self.type} cannot be tested by contentctl" + ) @property def test_status(self) -> TestResultStatus | None: @@ -491,22 +520,8 @@ def model_post_init(self, __context: Any) -> None: self.cve_enrichment_func(__context) - self.skip_manual_tests() - - # NOTE: we ignore the type error around self.status because we are using Pydantic's - # use_enum_values configuration - # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name - - # Skip tests for non-production detections - if self.status != DetectionStatus.production.value: # type: ignore - self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status})") - - # TODO (cmcginley): should we just mark all Correlation type detections as manual_test? - # Skip tests for detecton types like Correlation which are not supported via contentctl - if self.type in UNTESTED_ANALYTICS_TYPES: - self.skip_all_tests( - f"TEST SKIPPED: Detection type {self.type} cannot be tested by contentctl" - ) + # Derive TestGroups and IntegrationTests, adjust for ManualTests, skip as needed + self.adjust_tests_and_groups() @field_validator('lookups', mode="before") @classmethod @@ -722,25 +737,27 @@ def search_observables_exist_validate(self): # Found everything return self - @model_validator(mode='after') - def ensurePresenceOfRequiredTests(self): - # NOTE: we ignore the type error around self.status because we are using Pydantic's - # use_enum_values configuration - # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name + # TODO (cmcginley): the check here is identical to what is being performed in the + # tests_validate func, can we remove? + # @model_validator(mode='after') + # def ensurePresenceOfRequiredTests(self): + # # NOTE: we ignore the type error around self.status because we are using Pydantic's + # # use_enum_values configuration + # # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name - # Only production analytics require tests - if self.status != DetectionStatus.production.value: # type: ignore - return self + # # Only production analytics require tests + # if self.status != DetectionStatus.production.value: # type: ignore + # return self - # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. - # Accordingly, we do not need to do additional checks if the type is Correlation - if self.type in UNTESTED_ANALYTICS_TYPES: - return self + # # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. + # # Accordingly, we do not need to do additional checks if the type is Correlation + # if self.type in UNTESTED_ANALYTICS_TYPES: + # return self - if len(self.tests) == 0: - raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}") + # if len(self.tests) == 0: + # raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}") - return self + # return self @field_validator("tests") def tests_validate( @@ -748,57 +765,25 @@ def tests_validate( v: list[UnitTest | IntegrationTest | ManualTest], info: ValidationInfo ) -> list[UnitTest | IntegrationTest | ManualTest]: - # Pull the tags and a flag indicating if this is a manual_test detection or not - tags: DetectionTags | None = info.data.get("tags", None) - - # Since we ManualTest and UnitTest are not differentiable without looking at the manual_test - # tag, Pydantic builds all tests as UnitTest objects. If we see the manual_test flag, we - # convert these to ManualTest - tmp: list[UnitTest | IntegrationTest | ManualTest] = [] - if tags is not None and tags.manual_test is not None: - for test in v: - if not isinstance(test, UnitTest): - raise ValueError( - "At this point of intialization, tests should only be UnitTest objects, " - f"but encountered a {type(test)}." - ) - # Create the manual test and skip it upon creation (cannot test via contentctl) - manual_test = ManualTest( - name=test.name, - attack_data=test.attack_data - ) - tmp.append(manual_test) - v = tmp - # Only production analytics require tests if info.data.get("status", "") != DetectionStatus.production.value: return v # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. # Accordingly, we do not need to do additional checks if the type is Correlation - if info.data.get("type", "") in set([AnalyticsType.Correlation.value]): + if info.data.get("type", "") in UNTESTED_ANALYTICS_TYPES: + return v + + # Manually tested detections are not required to have tests defined + tags: DetectionTags | None = info.data.get("tags", None) + if tags is not None and tags.manual_test is not None: return v # Ensure that there is at least 1 test if len(v) == 0: - if tags is not None and tags.manual_test is not None: - # Detections that are manual_test MAY have tests, but it is not required. If they - # do not have one, then create one which will be a placeholder. - # Note that this fake ManualTest will NOT be generated if there ARE test(s) defined - # for a Detection. - - # TODO (cmcginley): make this ManualTest; ultimately this test case should be - # explicit and not implicit - placeholder_test = ManualTest( # type: ignore - name="PLACEHOLDER FOR DETECTION TAGGED MANUAL_TEST WITH NO TESTS SPECIFIED IN YML FILE", - attack_data=[] - ) - return [placeholder_test] - - else: - raise ValueError( - "At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND") - ) + raise ValueError( + "At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND") + ) # No issues - at least one test provided for production type requiring testing return v @@ -868,6 +853,7 @@ def get_summary( detection_field_aliases: dict[str, str] = { "status": "production_status", "test_status": "status", "source": "source_category" }, + tags_fields: list[str] = ["manual_test"], test_result_fields: list[str] = ["success", "message", "exception", "status", "duration", "wait_duration"], test_job_fields: list[str] = ["resultCount", "runDuration"], ) -> dict[str, Any]: @@ -895,6 +881,10 @@ def get_summary( else: summary_dict[field] = value + # Grab fields from the tags + for field in tags_fields: + summary_dict[field] = getattr(self.tags, field) + # Set success based on whether all tests passed summary_dict["success"] = self.all_tests_successful() From 6eaa95f797e38750d01c02c5b1cb297e05557f14 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Thu, 22 Aug 2024 18:21:39 -0700 Subject: [PATCH 07/18] adding filepaths for junit --- .../actions/detection_testing/views/DetectionTestingView.py | 4 +++- .../abstract_security_content_objects/detection_abstract.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index d444fbad..f7266efb 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -182,7 +182,9 @@ def getSummaryObject( total_pass, total_tested_detections, 1 ) - # TODO (#230): expand testing metrics reported + # TODO (cmcginley): the dual language of skipped_detections and untested_detections is a + # bit confusing; consider renaming + # TODO (#230): expand testing metrics reported (and make nested) # Construct and return the larger results dict result_dict = { # TODO (cmcginley): differentiate total detections vs total tested diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 680c28e2..7deebe90 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -849,7 +849,9 @@ def all_tests_successful(self) -> bool: def get_summary( self, - detection_fields: list[str] = ["name", "type", "status", "test_status", "source", "data_source", "search"], + detection_fields: list[str] = [ + "name", "type", "status", "test_status", "source", "data_source", "search", "file_path" + ], detection_field_aliases: dict[str, str] = { "status": "production_status", "test_status": "status", "source": "source_category" }, From 63817e507122ed59cdd3433f4139ed411e5be86e Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Thu, 22 Aug 2024 18:36:21 -0700 Subject: [PATCH 08/18] bugfix on file path typing --- .../abstract_security_content_objects/detection_abstract.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 7deebe90..c775704b 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -873,8 +873,8 @@ def get_summary( for field in detection_fields: value = getattr(self, field) - # Enums cannot be serialized directly, so we convert it to a string - if isinstance(getattr(self, field), Enum): + # Enums and Path objects cannot be serialized directly, so we convert it to a string + if isinstance(value, Enum) or isinstance(value, pathlib.Path): value = str(value) # Alias any fields as needed From 237427d65c01a72e4d7f10347e589debb38f9143 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Thu, 22 Aug 2024 19:15:14 -0700 Subject: [PATCH 09/18] comments and cleanup --- .../DetectionTestingInfrastructure.py | 3 -- .../views/DetectionTestingView.py | 17 +------- .../views/DetectionTestingViewFile.py | 2 +- contentctl/actions/test.py | 39 +++++++------------ contentctl/contentctl.py | 7 ++-- .../detection_abstract.py | 37 +++++------------- contentctl/objects/integration_test_result.py | 3 -- contentctl/objects/test_attack_data.py | 7 ++-- contentctl/objects/test_group.py | 2 +- contentctl/objects/unit_test.py | 2 - 10 files changed, 33 insertions(+), 86 deletions(-) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index f1fbf177..34b4f330 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -10,7 +10,6 @@ from tempfile import TemporaryDirectory, mktemp from ssl import SSLEOFError, SSLZeroReturnError from sys import stdout -#from dataclasses import dataclass from shutil import copyfile from typing import Union, Optional @@ -396,7 +395,6 @@ def execute(self): self.sync_obj.outputQueue.append(detection) self.sync_obj.currentTestingQueue[self.get_name()] = None - # TODO (cmgcginley): 2024-08-22 02:08:23,726 - INFO - security-content-automation [MainThread] - Error stopping view: ('cannot represent an object', ) def test_detection(self, detection: Detection) -> None: """ Tests a single detection; iterates over the TestGroups for the detection (one TestGroup per @@ -878,7 +876,6 @@ def execute_integration_test( service=self.get_conn(), pbar_data=pbar_data, ) - assert detection == correlation_search.detection # Run the test test.result = correlation_search.test() diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index f7266efb..4d782127 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -76,14 +76,14 @@ def getSummaryObject( self, test_result_fields: list[str] = ["success", "message", "exception", "status", "duration", "wait_duration"], test_job_fields: list[str] = ["resultCount", "runDuration"], - ) -> dict: + ) -> dict[str, dict[str, Any] | list[dict[str, Any]] | str]: """ Iterates over detections, consolidating results into a single dict and aggregating metrics :param test_result_fields: fields to pull from the test result :param test_job_fields: fields to pull from the job content of the test result :returns: summary dict """ - # Init the list of tested detections, and some metrics aggregate counters + # Init the list of tested and skipped detections, and some metrics aggregate counters tested_detections: list[dict[str, Any]] = [] skipped_detections: list[dict[str, Any]] = [] total_pass = 0 @@ -101,7 +101,6 @@ def getSummaryObject( test_job_fields=test_job_fields, test_result_fields=test_result_fields ) - # TODO (cmcginley): I find the enum to string conversion we use to be v confusing # Aggregate detection pass/fail metrics if detection.test_status == TestResultStatus.FAIL: total_fail += 1 @@ -153,15 +152,6 @@ def getSummaryObject( # Sort by detection name untested_detections.sort(key=lambda x: x["name"]) - # TODO (cmcginley): I think skippedQueue is no longer used? - # Get lists of detections (name only) that were skipped due to their status (experimental or deprecated) - # experimental_detections = sorted([ - # detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.experimental.value - # ]) - # deprecated_detections = sorted([ - # detection.name for detection in self.sync_obj.skippedQueue if detection.status == DetectionStatus.deprecated.value - # ]) - # If any detection failed, or if there are untested detections, the overall success is False if (total_fail + len(untested_detections)) == 0: overall_success = True @@ -182,12 +172,9 @@ def getSummaryObject( total_pass, total_tested_detections, 1 ) - # TODO (cmcginley): the dual language of skipped_detections and untested_detections is a - # bit confusing; consider renaming # TODO (#230): expand testing metrics reported (and make nested) # Construct and return the larger results dict result_dict = { - # TODO (cmcginley): differentiate total detections vs total tested "summary": { "mode": self.config.getModeName(), "enable_integration_testing": self.config.enable_integration_testing, diff --git a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py index d8c15e02..0e18b1c7 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingViewFile.py @@ -31,7 +31,7 @@ def stop(self): # use the yaml writer class with open(output_file, "w") as res: - res.write(yaml.safe_dump(result_dict,sort_keys=False)) + res.write(yaml.safe_dump(result_dict, sort_keys=False)) def showStatus(self, interval: int = 60): pass diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index ec69bd01..0a377894 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -45,7 +45,7 @@ class TestInputDto: class Test: - def filter_detections(self, input_dto: TestInputDto)->TestInputDto: + def filter_tests(self, input_dto: TestInputDto) -> TestInputDto: if not input_dto.config.enable_integration_testing: # Skip all integraiton tests if integration testing is not enabled: for detection in input_dto.detections: @@ -53,28 +53,9 @@ def filter_detections(self, input_dto: TestInputDto)->TestInputDto: if isinstance(test, IntegrationTest): test.skip("TEST SKIPPED: Skipping all integration tests") - # TODO (cmcginley): would we rather skip these detections here instead of during model - # creation? - # list_after_filtering:List[Detection] = [] - # #extra filtering which may be removed/modified in the future - # for detection in input_dto.detections: - # if (detection.status != DetectionStatus.production.value): - # #print(f"{detection.name} - Not testing because [STATUS: {detection.status}]") - # pass - # elif detection.type == AnalyticsType.Correlation: - # #print(f"{detection.name} - Not testing because [ TYPE: {detection.type}]") - # pass - # else: - # list_after_filtering.append(detection) - - # return TestInputDto(list_after_filtering, input_dto.config) - return input_dto def execute(self, input_dto: TestInputDto) -> bool: - - - output_dto = DetectionTestingManagerOutputDto() web = DetectionTestingViewWeb(config=input_dto.config, sync_obj=output_dto) @@ -89,22 +70,28 @@ def execute(self, input_dto: TestInputDto) -> bool: manager = DetectionTestingManager( input_dto=manager_input_dto, output_dto=output_dto ) - + mode = input_dto.config.getModeName() if len(input_dto.detections) == 0: - print(f"With Detection Testing Mode '{mode}', there were [0] detections found to test.\nAs such, we will quit immediately.") - # Directly call stop so that the summary.yml will be generated. Of course it will not have any test results, but we still want it to contain - # a summary showing that now detections were tested. + print( + f"With Detection Testing Mode '{mode}', there were [0] detections found to test." + "\nAs such, we will quit immediately." + ) + # Directly call stop so that the summary.yml will be generated. Of course it will not + # have any test results, but we still want it to contain a summary showing that now + # detections were tested. file.stop() else: print(f"MODE: [{mode}] - Test [{len(input_dto.detections)}] detections") if mode in [DetectionTestingMode.changes.value, DetectionTestingMode.selected.value]: - files_string = '\n- '.join([str(pathlib.Path(detection.file_path).relative_to(input_dto.config.path)) for detection in input_dto.detections]) + files_string = '\n- '.join( + [str(pathlib.Path(detection.file_path).relative_to(input_dto.config.path)) for detection in input_dto.detections] + ) print(f"Detections:\n- {files_string}") manager.setup() manager.execute() - + try: summary_results = file.getSummaryObject() summary = summary_results.get("summary", {}) diff --git a/contentctl/contentctl.py b/contentctl/contentctl.py index 2ec03507..c21aa407 100644 --- a/contentctl/contentctl.py +++ b/contentctl/contentctl.py @@ -114,10 +114,9 @@ def test_common_func(config:test_common): t = Test() - # TODO (cmcginley): what language do we want here? - # Remove detections that we do not want to test because they are - # not production, the correct type, or manual_test only - filted_test_input_dto = t.filter_detections(test_input_dto) + # Remove detections or disable tests that we do not want to test (e.g. integration testing is + # disabled) + filted_test_input_dto = t.filter_tests(test_input_dto) if config.plan_only: #Emit the test plan and quit. Do not actually run the test diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index c775704b..3381ac17 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -1,5 +1,5 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated, Self +from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated import re import pathlib from enum import Enum @@ -182,7 +182,6 @@ def adjust_tests_and_groups(self) -> None: if self.status != DetectionStatus.production.value: # type: ignore self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status})") - # TODO (cmcginley): should we just mark all Correlation type detections as manual_test? # Skip tests for detecton types like Correlation which are not supported via contentctl if self.type in UNTESTED_ANALYTICS_TYPES: self.skip_all_tests( @@ -194,7 +193,8 @@ def test_status(self) -> TestResultStatus | None: """ Returns the collective status of the detections tests. If any test status has yet to be set, None is returned.If any test failed or errored, FAIL is returned. If all tests were skipped, - SKIP is returned. If at least one test passed and the rest passed or skipped, PASS is returned. + SKIP is returned. If at least one test passed and the rest passed or skipped, PASS is + returned. """ # If the detection has no tests, we consider it to have been skipped (only non-production, # non-manual, non-correlation detections are allowed to have no tests defined) @@ -737,28 +737,6 @@ def search_observables_exist_validate(self): # Found everything return self - # TODO (cmcginley): the check here is identical to what is being performed in the - # tests_validate func, can we remove? - # @model_validator(mode='after') - # def ensurePresenceOfRequiredTests(self): - # # NOTE: we ignore the type error around self.status because we are using Pydantic's - # # use_enum_values configuration - # # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name - - # # Only production analytics require tests - # if self.status != DetectionStatus.production.value: # type: ignore - # return self - - # # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. - # # Accordingly, we do not need to do additional checks if the type is Correlation - # if self.type in UNTESTED_ANALYTICS_TYPES: - # return self - - # if len(self.tests) == 0: - # raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}") - - # return self - @field_validator("tests") def tests_validate( cls, @@ -769,8 +747,9 @@ def tests_validate( if info.data.get("status", "") != DetectionStatus.production.value: return v - # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them. - # Accordingly, we do not need to do additional checks if the type is Correlation + # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined + # types, requires them. Accordingly, we do not need to do additional checks if the type is + # Correlation if info.data.get("type", "") in UNTESTED_ANALYTICS_TYPES: return v @@ -789,6 +768,10 @@ def tests_validate( return v def skip_all_tests(self, message: str = "TEST SKIPPED") -> None: + """ + Given a message, skip all tests for this detection. + :param message: the message to set in the test result + """ for test in self.tests: test.skip(message=message) diff --git a/contentctl/objects/integration_test_result.py b/contentctl/objects/integration_test_result.py index 66553def..18390120 100644 --- a/contentctl/objects/integration_test_result.py +++ b/contentctl/objects/integration_test_result.py @@ -1,8 +1,5 @@ from contentctl.objects.base_test_result import BaseTestResult -# TODO (cmcginley): this seems unused, can I delete? -SAVED_SEARCH_TEMPLATE = "{server}:{web_port}/en-US/{path}" - class IntegrationTestResult(BaseTestResult): """ diff --git a/contentctl/objects/test_attack_data.py b/contentctl/objects/test_attack_data.py index 8421199b..2c53df0b 100644 --- a/contentctl/objects/test_attack_data.py +++ b/contentctl/objects/test_attack_data.py @@ -1,13 +1,12 @@ from __future__ import annotations from pydantic import BaseModel, HttpUrl, FilePath, Field -from typing import Union, Optional class TestAttackData(BaseModel): - data: Union[HttpUrl, FilePath] = Field(...) + data: HttpUrl | FilePath = Field(...) # TODO - should source and sourcetype should be mapped to a list # of supported source and sourcetypes in a given environment? source: str = Field(...) sourcetype: str = Field(...) - custom_index: Optional[str] = None - host: Optional[str] = None + custom_index: str | None = None + host: str | None = None diff --git a/contentctl/objects/test_group.py b/contentctl/objects/test_group.py index 6156f68d..6fb76c7d 100644 --- a/contentctl/objects/test_group.py +++ b/contentctl/objects/test_group.py @@ -9,7 +9,7 @@ class TestGroup(BaseModel): """ Groups of different types of tests relying on the same attack data - :param name: Name of the TestGroup (typically derived from a unit test as + :param name: Name of the TestGroup (typically derived from a unit test as "{detection.name}:{test.name}") :param unit_test: a UnitTest :param integration_test: an IntegrationTest diff --git a/contentctl/objects/unit_test.py b/contentctl/objects/unit_test.py index 66ed7921..a18160ff 100644 --- a/contentctl/objects/unit_test.py +++ b/contentctl/objects/unit_test.py @@ -18,11 +18,9 @@ class UnitTest(BaseTest): # The test type (unit) test_type: TestType = Field(default=TestType.UNIT) - # TODO (cmcginley): looks like pass_condition has gone the way of the dodo; can I remove? # The condition to check if the search was successful pass_condition: str | None = None - # TODO (cmcginley): looks like baselines has gone the way of the dodo; can I remove? # Baselines to be run before a unit test baselines: list[UnitTestBaseline] = [] From f66c60f6fe6756e1418f0a0890c2689ae871a152 Mon Sep 17 00:00:00 2001 From: pyth0n1c Date: Fri, 23 Aug 2024 17:43:46 -0700 Subject: [PATCH 10/18] Responses to Casey's recommendations --- .../DetectionTestingInfrastructure.py | 20 +++++++++---------- contentctl/actions/test.py | 10 ---------- contentctl/contentctl.py | 8 ++------ contentctl/objects/unit_test.py | 3 --- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index 994d5ea4..9224dc45 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -60,13 +60,14 @@ class CleanupTestGroupResults(BaseModel): class ContainerStoppedException(Exception): pass +class CannotRunBaselineException(Exception): + pass @dataclasses.dataclass(frozen=False) class DetectionTestingManagerOutputDto(): inputQueue: list[Detection] = Field(default_factory=list) outputQueue: list[Detection] = Field(default_factory=list) - skippedQueue: list[Detection] = Field(default_factory=list) currentTestingQueue: dict[str, Union[Detection, None]] = Field(default_factory=dict) start_time: Union[datetime.datetime, None] = None replay_index: str = "CONTENTCTL_TESTING_INDEX" @@ -1014,18 +1015,15 @@ def retry_search_until_timeout( """ # Get the start time and compute the timeout search_start_time = time.time() - search_stop_time = time.time() + self.sync_obj.timeout_seconds - - # We will default to ensuring at least one result exists - if test.pass_condition is None: - search = detection.search - else: - # Else, use the explicit pass condition - search = f"{detection.search} {test.pass_condition}" + search_stop_time = time.time() + self.sync_obj.timeout_seconds + + # Make a copy of the search string since we may + # need to make some small changes to it below + search = detection.search # Ensure searches that do not begin with '|' must begin with 'search ' - if not search.strip().startswith("|"): # type: ignore - if not search.strip().startswith("search "): # type: ignore + if not search.strip().startswith("|"): + if not search.strip().startswith("search "): search = f"search {search}" # exponential backoff for wait time diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index 0a377894..de377d84 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -45,16 +45,6 @@ class TestInputDto: class Test: - def filter_tests(self, input_dto: TestInputDto) -> TestInputDto: - if not input_dto.config.enable_integration_testing: - # Skip all integraiton tests if integration testing is not enabled: - for detection in input_dto.detections: - for test in detection.tests: - if isinstance(test, IntegrationTest): - test.skip("TEST SKIPPED: Skipping all integration tests") - - return input_dto - def execute(self, input_dto: TestInputDto) -> bool: output_dto = DetectionTestingManagerOutputDto() diff --git a/contentctl/contentctl.py b/contentctl/contentctl.py index c21aa407..388c5414 100644 --- a/contentctl/contentctl.py +++ b/contentctl/contentctl.py @@ -114,16 +114,12 @@ def test_common_func(config:test_common): t = Test() - # Remove detections or disable tests that we do not want to test (e.g. integration testing is - # disabled) - filted_test_input_dto = t.filter_tests(test_input_dto) - if config.plan_only: #Emit the test plan and quit. Do not actually run the test - config.dumpCICDPlanAndQuit(gitServer.getHash(),filted_test_input_dto.detections) + config.dumpCICDPlanAndQuit(gitServer.getHash(),test_input_dto.detections) return - success = t.execute(filted_test_input_dto) + success = t.execute(test_input_dto) if success: #Everything passed! diff --git a/contentctl/objects/unit_test.py b/contentctl/objects/unit_test.py index a18160ff..f9327e13 100644 --- a/contentctl/objects/unit_test.py +++ b/contentctl/objects/unit_test.py @@ -18,9 +18,6 @@ class UnitTest(BaseTest): # The test type (unit) test_type: TestType = Field(default=TestType.UNIT) - # The condition to check if the search was successful - pass_condition: str | None = None - # Baselines to be run before a unit test baselines: list[UnitTestBaseline] = [] From dcbacfc9222fcb57e494c1a110a44aaeb87f2e04 Mon Sep 17 00:00:00 2001 From: pyth0n1c Date: Mon, 26 Aug 2024 14:52:28 -0700 Subject: [PATCH 11/18] Added back the integration test filtering logic to the original location. Added logic to throw an error and description for why baselines throw an error right now. --- .../DetectionTestingInfrastructure.py | 24 +++++++++++++++---- contentctl/actions/test.py | 19 ++++++++++++++- contentctl/contentctl.py | 1 + 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index 9224dc45..ac6001a8 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -61,6 +61,11 @@ class CleanupTestGroupResults(BaseModel): class ContainerStoppedException(Exception): pass class CannotRunBaselineException(Exception): + # Support for testing detections with baselines + # does not currently exist in contentctl. + # As such, whenever we encounter a detection + # with baselines we should generate a descriptive + # exception pass @@ -647,11 +652,7 @@ def execute_unit_test( # Set the mode and timeframe, if required kwargs = {"exec_mode": "blocking"} - # Iterate over baselines (if any) - for baseline in test.baselines: - # TODO: this is executing the test, not the baseline... - # TODO: should this be in a try/except if the later call is? - self.retry_search_until_timeout(detection, test, kwargs, test_start_time) + # Set earliest_time and latest_time appropriately if FORCE_ALL_TIME is False if not FORCE_ALL_TIME: @@ -662,7 +663,20 @@ def execute_unit_test( # Run the detection's search query try: + # Iterate over baselines (if any) + for baseline in test.baselines: + raise CannotRunBaselineException("Baseline execution is not currently supported in contentctl") self.retry_search_until_timeout(detection, test, kwargs, test_start_time) + except CannotRunBaselineException as e: + # Init the test result and record a failure if there was an issue during the search + test.result = UnitTestResult() + test.result.set_job_content( + None, + self.infrastructure, + TestResultStatus.ERROR, + exception=e, + duration=time.time() - test_start_time + ) except ContainerStoppedException as e: raise e except Exception as e: diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index de377d84..d4ee014d 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -44,7 +44,24 @@ class TestInputDto: class Test: - + def filter_tests(self, input_dto: TestInputDto) -> None: + """ + If integration testing has NOT been enabled, then skip + all of the integration tests. Otherwise, do nothing + + Args: + input_dto (TestInputDto): A configuration of the test and all of the + tests to be run. + """ + + if not input_dto.config.enable_integration_testing: + # Skip all integraiton tests if integration testing is not enabled: + for detection in input_dto.detections: + for test in detection.tests: + if isinstance(test, IntegrationTest): + test.skip("TEST SKIPPED: Skipping all integration tests") + + def execute(self, input_dto: TestInputDto) -> bool: output_dto = DetectionTestingManagerOutputDto() diff --git a/contentctl/contentctl.py b/contentctl/contentctl.py index 388c5414..e6d8c5d7 100644 --- a/contentctl/contentctl.py +++ b/contentctl/contentctl.py @@ -113,6 +113,7 @@ def test_common_func(config:test_common): test_input_dto = TestInputDto(detections_to_test, config) t = Test() + t.filter_tests(test_input_dto) if config.plan_only: #Emit the test plan and quit. Do not actually run the test From 779c7cbddd1bfb86133d1748f954289f7c824a11 Mon Sep 17 00:00:00 2001 From: pyth0n1c Date: Mon, 26 Aug 2024 15:42:43 -0700 Subject: [PATCH 12/18] Throw runtime exceptions when trying to test a production search with Baseline(s) that is not marked as manual_test --- .../infrastructures/DetectionTestingInfrastructure.py | 7 +++++-- contentctl/objects/unit_test.py | 3 --- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py index ac6001a8..5ab9a257 100644 --- a/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +++ b/contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py @@ -664,8 +664,11 @@ def execute_unit_test( # Run the detection's search query try: # Iterate over baselines (if any) - for baseline in test.baselines: - raise CannotRunBaselineException("Baseline execution is not currently supported in contentctl") + for baseline in detection.baselines: + raise CannotRunBaselineException("Detection requires Execution of a Baseline, " + "however Baseline execution is not " + "currently supported in contentctl. Mark " + "this as manual_test.") self.retry_search_until_timeout(detection, test, kwargs, test_start_time) except CannotRunBaselineException as e: # Init the test result and record a failure if there was an issue during the search diff --git a/contentctl/objects/unit_test.py b/contentctl/objects/unit_test.py index f9327e13..67dc1d62 100644 --- a/contentctl/objects/unit_test.py +++ b/contentctl/objects/unit_test.py @@ -18,9 +18,6 @@ class UnitTest(BaseTest): # The test type (unit) test_type: TestType = Field(default=TestType.UNIT) - # Baselines to be run before a unit test - baselines: list[UnitTestBaseline] = [] - # The attack data to be ingested for the unit test attack_data: list[TestAttackData] From 71b2559d64ee335c32d092f9c72d05f8276eb890 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Tue, 27 Aug 2024 09:37:21 -0700 Subject: [PATCH 13/18] decorated with issue #257 --- .../detection_abstract.py | 1 + .../security_content_object_abstract.py | 1 + contentctl/objects/config.py | 25 +++++++++++++++---- contentctl/objects/detection_tags.py | 1 + contentctl/objects/investigation.py | 1 + contentctl/objects/mitre_attack_enrichment.py | 1 + contentctl/objects/ssa_detection.py | 1 + contentctl/objects/story_tags.py | 2 ++ 8 files changed, 28 insertions(+), 5 deletions(-) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index fde24f6e..86a45a00 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -48,6 +48,7 @@ } +# TODO (#257): disable the use_enum_values configuration class Detection_Abstract(SecurityContentObject): model_config = ConfigDict(use_enum_values=True) diff --git a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py index d257a515..f23a3ace 100644 --- a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py @@ -29,6 +29,7 @@ NO_FILE_NAME = "NO_FILE_NAME" +# TODO (#257): disable the use_enum_values configuration class SecurityContentObject_Abstract(BaseModel, abc.ABC): model_config = ConfigDict(use_enum_values=True, validate_default=True) diff --git a/contentctl/objects/config.py b/contentctl/objects/config.py index 04c65871..278b653a 100644 --- a/contentctl/objects/config.py +++ b/contentctl/objects/config.py @@ -27,6 +27,8 @@ SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/{uid}/release/{version}/download" + +# TODO (#257): disable the use_enum_values configuration class App_Base(BaseModel,ABC): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) uid: Optional[int] = Field(default=None) @@ -51,6 +53,8 @@ def ensureAppPathExists(self, config:test, stage_file:bool=False): if not config.getLocalAppDir().exists(): config.getLocalAppDir().mkdir(parents=True) + +# TODO (#257): disable the use_enum_values configuration class TestApp(App_Base): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) hardcoded_path: Optional[Union[FilePath,HttpUrl]] = Field(default=None, description="This may be a relative or absolute link to a file OR an HTTP URL linking to your app.") @@ -89,6 +93,8 @@ def getApp(self, config:test,stage_file:bool=False)->str: return str(destination) + +# TODO (#257): disable the use_enum_values configuration class CustomApp(App_Base): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) # Fields required for app.conf based on @@ -149,6 +155,7 @@ def getApp(self, config:test, stage_file=True)->str: return str(destination) +# TODO (#257): disable the use_enum_values configuration class Config_Base(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) @@ -167,6 +174,7 @@ class init(Config_Base): pass +# TODO (#257): disable the use_enum_values configuration class validate(Config_Base): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) enrichments: bool = Field(default=False, description="Enable MITRE, APP, and CVE Enrichments. "\ @@ -186,7 +194,7 @@ def getReportingPath(self)->pathlib.Path: return self.path/"reporting/" - +# TODO (#257): disable the use_enum_values configuration class build(validate): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) build_path: DirectoryPath = Field(default=DirectoryPath("dist/"), title="Target path for all build outputs") @@ -255,6 +263,7 @@ class new(Config_Base): type: NewContentType = Field(default=NewContentType.detection, description="Specify the type of content you would like to create.") +# TODO (#257): disable the use_enum_values configuration class deploy_acs(inspect): model_config = ConfigDict(use_enum_values=True,validate_default=False, arbitrary_types_allowed=True) #ignore linter error @@ -262,6 +271,7 @@ class deploy_acs(inspect): splunk_cloud_stack: str = Field(description="The name of your Splunk Cloud Stack") +# TODO (#257): disable the use_enum_values configuration class Infrastructure(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) splunk_app_username:str = Field(default="admin", description="Username for logging in to your Splunk Server") @@ -273,11 +283,13 @@ class Infrastructure(BaseModel): instance_name: str = Field(...) +# TODO (#257): disable the use_enum_values configuration class Container(Infrastructure): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) instance_address:str = Field(default="localhost", description="Address of your splunk server.") +# TODO (#257): disable the use_enum_values configuration class ContainerSettings(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) leave_running: bool = Field(default=True, description="Leave container running after it is first " @@ -302,11 +314,14 @@ class All(BaseModel): #Doesn't need any extra logic pass + +# TODO (#257): disable the use_enum_values configuration class Changes(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) target_branch:str = Field(...,description="The target branch to diff against. Note that this includes uncommitted changes in the working directory as well.") +# TODO (#257): disable the use_enum_values configuration class Selected(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) files:List[FilePath] = Field(...,description="List of detection files to test, separated by spaces.") @@ -679,10 +694,7 @@ def getModeName(self)->str: return DetectionTestingMode.selected.value - - - - +# TODO (#257): disable the use_enum_values configuration class test(test_common): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) container_settings:ContainerSettings = ContainerSettings() @@ -747,6 +759,9 @@ def getAppFilePath(self): TEST_ARGS_ENV = "CONTENTCTL_TEST_INFRASTRUCTURES" + + +# TODO (#257): disable the use_enum_values configuration class test_servers(test_common): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) test_instances:List[Infrastructure] = Field([],description="Test against one or more preconfigured servers.", validate_default=True) diff --git a/contentctl/objects/detection_tags.py b/contentctl/objects/detection_tags.py index 78c82430..448cbfb8 100644 --- a/contentctl/objects/detection_tags.py +++ b/contentctl/objects/detection_tags.py @@ -35,6 +35,7 @@ from contentctl.objects.atomic import AtomicTest +# TODO (#257): disable the use_enum_values configuration class DetectionTags(BaseModel): # detection spec model_config = ConfigDict(use_enum_values=True, validate_default=False) diff --git a/contentctl/objects/investigation.py b/contentctl/objects/investigation.py index 1ca980ea..1e7beb89 100644 --- a/contentctl/objects/investigation.py +++ b/contentctl/objects/investigation.py @@ -9,6 +9,7 @@ from contentctl.objects.investigation_tags import InvestigationTags +# TODO (#257): disable the use_enum_values configuration class Investigation(SecurityContentObject): model_config = ConfigDict(use_enum_values=True,validate_default=False) type: str = Field(...,pattern="^Investigation$") diff --git a/contentctl/objects/mitre_attack_enrichment.py b/contentctl/objects/mitre_attack_enrichment.py index 627b261f..44ca8e46 100644 --- a/contentctl/objects/mitre_attack_enrichment.py +++ b/contentctl/objects/mitre_attack_enrichment.py @@ -82,6 +82,7 @@ def standardize_contributors(cls, contributors:list[str] | None) -> list[str]: return [] return contributors +# TODO (#257): disable the use_enum_values configuration class MitreAttackEnrichment(BaseModel): ConfigDict(use_enum_values=True) mitre_attack_id: Annotated[str, Field(pattern=r"^T\d{4}(.\d{3})?$")] = Field(...) diff --git a/contentctl/objects/ssa_detection.py b/contentctl/objects/ssa_detection.py index 036f0b77..415215d4 100644 --- a/contentctl/objects/ssa_detection.py +++ b/contentctl/objects/ssa_detection.py @@ -59,6 +59,7 @@ class SSADetection(BaseModel): # raise ValueError('name is longer then 67 chars: ' + v) # return v + # TODO (#257): disable the use_enum_values configuration class Config: use_enum_values = True diff --git a/contentctl/objects/story_tags.py b/contentctl/objects/story_tags.py index c80a9d3b..ac19c5f7 100644 --- a/contentctl/objects/story_tags.py +++ b/contentctl/objects/story_tags.py @@ -17,6 +17,8 @@ class StoryUseCase(str,Enum): INSIDER_THREAT = "Insider Threat" OTHER = "Other" + +# TODO (#257): disable the use_enum_values configuration class StoryTags(BaseModel): model_config = ConfigDict(extra='forbid', use_enum_values=True) category: List[StoryCategory] = Field(...,min_length=1) From 05c308a0f016ad050d9745ac43f62686a34c4c79 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Tue, 27 Aug 2024 09:39:20 -0700 Subject: [PATCH 14/18] Used the wrong issue number --- .../detection_abstract.py | 2 +- .../security_content_object_abstract.py | 2 +- contentctl/objects/config.py | 28 +++++++++---------- contentctl/objects/detection_tags.py | 2 +- contentctl/objects/investigation.py | 2 +- contentctl/objects/mitre_attack_enrichment.py | 2 +- contentctl/objects/ssa_detection.py | 2 +- contentctl/objects/story_tags.py | 2 +- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index 86a45a00..e9172f5f 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -48,7 +48,7 @@ } -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Detection_Abstract(SecurityContentObject): model_config = ConfigDict(use_enum_values=True) diff --git a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py index f23a3ace..40b3aedc 100644 --- a/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py @@ -29,7 +29,7 @@ NO_FILE_NAME = "NO_FILE_NAME" -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class SecurityContentObject_Abstract(BaseModel, abc.ABC): model_config = ConfigDict(use_enum_values=True, validate_default=True) diff --git a/contentctl/objects/config.py b/contentctl/objects/config.py index 278b653a..4ae52488 100644 --- a/contentctl/objects/config.py +++ b/contentctl/objects/config.py @@ -28,7 +28,7 @@ SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/{uid}/release/{version}/download" -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class App_Base(BaseModel,ABC): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) uid: Optional[int] = Field(default=None) @@ -54,7 +54,7 @@ def ensureAppPathExists(self, config:test, stage_file:bool=False): config.getLocalAppDir().mkdir(parents=True) -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class TestApp(App_Base): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) hardcoded_path: Optional[Union[FilePath,HttpUrl]] = Field(default=None, description="This may be a relative or absolute link to a file OR an HTTP URL linking to your app.") @@ -94,7 +94,7 @@ def getApp(self, config:test,stage_file:bool=False)->str: return str(destination) -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class CustomApp(App_Base): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) # Fields required for app.conf based on @@ -155,7 +155,7 @@ def getApp(self, config:test, stage_file=True)->str: return str(destination) -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Config_Base(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) @@ -174,7 +174,7 @@ class init(Config_Base): pass -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class validate(Config_Base): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) enrichments: bool = Field(default=False, description="Enable MITRE, APP, and CVE Enrichments. "\ @@ -194,7 +194,7 @@ def getReportingPath(self)->pathlib.Path: return self.path/"reporting/" -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class build(validate): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) build_path: DirectoryPath = Field(default=DirectoryPath("dist/"), title="Target path for all build outputs") @@ -263,7 +263,7 @@ class new(Config_Base): type: NewContentType = Field(default=NewContentType.detection, description="Specify the type of content you would like to create.") -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class deploy_acs(inspect): model_config = ConfigDict(use_enum_values=True,validate_default=False, arbitrary_types_allowed=True) #ignore linter error @@ -271,7 +271,7 @@ class deploy_acs(inspect): splunk_cloud_stack: str = Field(description="The name of your Splunk Cloud Stack") -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Infrastructure(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) splunk_app_username:str = Field(default="admin", description="Username for logging in to your Splunk Server") @@ -283,13 +283,13 @@ class Infrastructure(BaseModel): instance_name: str = Field(...) -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Container(Infrastructure): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) instance_address:str = Field(default="localhost", description="Address of your splunk server.") -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class ContainerSettings(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) leave_running: bool = Field(default=True, description="Leave container running after it is first " @@ -315,13 +315,13 @@ class All(BaseModel): pass -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Changes(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) target_branch:str = Field(...,description="The target branch to diff against. Note that this includes uncommitted changes in the working directory as well.") -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Selected(BaseModel): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) files:List[FilePath] = Field(...,description="List of detection files to test, separated by spaces.") @@ -694,7 +694,7 @@ def getModeName(self)->str: return DetectionTestingMode.selected.value -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class test(test_common): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) container_settings:ContainerSettings = ContainerSettings() @@ -761,7 +761,7 @@ def getAppFilePath(self): TEST_ARGS_ENV = "CONTENTCTL_TEST_INFRASTRUCTURES" -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class test_servers(test_common): model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True) test_instances:List[Infrastructure] = Field([],description="Test against one or more preconfigured servers.", validate_default=True) diff --git a/contentctl/objects/detection_tags.py b/contentctl/objects/detection_tags.py index 448cbfb8..452eaa1e 100644 --- a/contentctl/objects/detection_tags.py +++ b/contentctl/objects/detection_tags.py @@ -35,7 +35,7 @@ from contentctl.objects.atomic import AtomicTest -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class DetectionTags(BaseModel): # detection spec model_config = ConfigDict(use_enum_values=True, validate_default=False) diff --git a/contentctl/objects/investigation.py b/contentctl/objects/investigation.py index 1e7beb89..81eb5460 100644 --- a/contentctl/objects/investigation.py +++ b/contentctl/objects/investigation.py @@ -9,7 +9,7 @@ from contentctl.objects.investigation_tags import InvestigationTags -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class Investigation(SecurityContentObject): model_config = ConfigDict(use_enum_values=True,validate_default=False) type: str = Field(...,pattern="^Investigation$") diff --git a/contentctl/objects/mitre_attack_enrichment.py b/contentctl/objects/mitre_attack_enrichment.py index 44ca8e46..ca5b5714 100644 --- a/contentctl/objects/mitre_attack_enrichment.py +++ b/contentctl/objects/mitre_attack_enrichment.py @@ -82,7 +82,7 @@ def standardize_contributors(cls, contributors:list[str] | None) -> list[str]: return [] return contributors -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class MitreAttackEnrichment(BaseModel): ConfigDict(use_enum_values=True) mitre_attack_id: Annotated[str, Field(pattern=r"^T\d{4}(.\d{3})?$")] = Field(...) diff --git a/contentctl/objects/ssa_detection.py b/contentctl/objects/ssa_detection.py index 415215d4..15bd256c 100644 --- a/contentctl/objects/ssa_detection.py +++ b/contentctl/objects/ssa_detection.py @@ -59,7 +59,7 @@ class SSADetection(BaseModel): # raise ValueError('name is longer then 67 chars: ' + v) # return v - # TODO (#257): disable the use_enum_values configuration + # TODO (#266): disable the use_enum_values configuration class Config: use_enum_values = True diff --git a/contentctl/objects/story_tags.py b/contentctl/objects/story_tags.py index ac19c5f7..43e733c0 100644 --- a/contentctl/objects/story_tags.py +++ b/contentctl/objects/story_tags.py @@ -18,7 +18,7 @@ class StoryUseCase(str,Enum): OTHER = "Other" -# TODO (#257): disable the use_enum_values configuration +# TODO (#266): disable the use_enum_values configuration class StoryTags(BaseModel): model_config = ConfigDict(extra='forbid', use_enum_values=True) category: List[StoryCategory] = Field(...,min_length=1) From 0a9c4458a3a25d5112e1e4adeb80353139fff9ba Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Tue, 27 Aug 2024 11:28:22 -0700 Subject: [PATCH 15/18] removing other_skip count; reformatting CLI output --- .../views/DetectionTestingView.py | 1 - contentctl/actions/test.py | 16 ++++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index 4d782127..d0be5bcb 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -189,7 +189,6 @@ def getSummaryObject( "total_experimental": total_experimental, "total_deprecated": total_deprecated, "total_manual": total_manual, - "total_other_skips": total_skipped - total_deprecated - total_experimental - total_manual, "success_rate": success_rate, }, "tested_detections": tested_detections, diff --git a/contentctl/actions/test.py b/contentctl/actions/test.py index 0a377894..0c72504c 100644 --- a/contentctl/actions/test.py +++ b/contentctl/actions/test.py @@ -117,21 +117,29 @@ def execute(self, input_dto: TestInputDto) -> bool: f"\tSkipped Detections : {summary.get('total_skipped','ERROR')}" ) print( - f"\t Experimental Detections : {summary.get('total_experimental','ERROR')}" + "\tProduction Status :" ) print( - f"\t Deprecated Detections : {summary.get('total_deprecated','ERROR')}" + f"\t Production Detections : {summary.get('total_production','ERROR')}" + ) + print( + f"\t Experimental Detections : {summary.get('total_experimental','ERROR')}" ) print( - f"\t Manually Tested Detections : {summary.get('total_manual','ERROR')}" + f"\t Deprecated Detections : {summary.get('total_deprecated','ERROR')}" ) print( - f"\t Other Skipped Detections : {summary.get('total_other_skips','ERROR')}" + f"\tManually Tested Detections : {summary.get('total_manual','ERROR')}" ) print( f"\tUntested Detections : {summary.get('total_untested','ERROR')}" ) print(f"\tTest Results File : {file.getOutputFilePath()}") + print( + "\nNOTE: skipped detections include non-production, manually tested, and certain\n" + "detection types (e.g. Correlation), but there may be overlap between these\n" + "categories." + ) return summary_results.get("summary", {}).get("success", False) except Exception as e: From 8f0f20d6579dcf426516b9257de0b39b6df41201 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Tue, 27 Aug 2024 12:15:07 -0700 Subject: [PATCH 16/18] TODOs for #267 --- .../actions/detection_testing/views/DetectionTestingView.py | 2 ++ .../detection_testing/views/DetectionTestingViewCLI.py | 1 + .../abstract_security_content_objects/detection_abstract.py | 6 +++--- contentctl/objects/base_test_result.py | 1 + 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/contentctl/actions/detection_testing/views/DetectionTestingView.py b/contentctl/actions/detection_testing/views/DetectionTestingView.py index d0be5bcb..8ff6e583 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingView.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingView.py @@ -143,6 +143,8 @@ def getSummaryObject( ) ) + # TODO (#267): Align test reporting more closely w/ status enums (as it relates to + # "untested") # Aggregate summaries for the untested detections (anything still in the input queue was untested) total_untested = len(self.sync_obj.inputQueue) untested_detections: list[dict[str, Any]] = [] diff --git a/contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py b/contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py index c8a8bd09..1675fce4 100644 --- a/contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +++ b/contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py @@ -45,6 +45,7 @@ def setup(self): self.showStatus() + # TODO (#267): Align test reporting more closely w/ status enums (as it relates to "untested") def showStatus(self, interval: int = 1): while True: diff --git a/contentctl/objects/abstract_security_content_objects/detection_abstract.py b/contentctl/objects/abstract_security_content_objects/detection_abstract.py index e9172f5f..25a7fd9f 100644 --- a/contentctl/objects/abstract_security_content_objects/detection_abstract.py +++ b/contentctl/objects/abstract_security_content_objects/detection_abstract.py @@ -43,7 +43,7 @@ MISSING_SOURCES: set[str] = set() # Those AnalyticsTypes that we do not test via contentctl -UNTESTED_ANALYTICS_TYPES: set[str] = { +SKIPPED_ANALYTICS_TYPES: set[str] = { AnalyticsType.Correlation.value } @@ -177,7 +177,7 @@ def adjust_tests_and_groups(self) -> None: self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status})") # Skip tests for detecton types like Correlation which are not supported via contentctl - if self.type in UNTESTED_ANALYTICS_TYPES: + if self.type in SKIPPED_ANALYTICS_TYPES: self.skip_all_tests( f"TEST SKIPPED: Detection type {self.type} cannot be tested by contentctl" ) @@ -738,7 +738,7 @@ def tests_validate( # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined # types, requires them. Accordingly, we do not need to do additional checks if the type is # Correlation - if info.data.get("type", "") in UNTESTED_ANALYTICS_TYPES: + if info.data.get("type", "") in SKIPPED_ANALYTICS_TYPES: return v # Manually tested detections are not required to have tests defined diff --git a/contentctl/objects/base_test_result.py b/contentctl/objects/base_test_result.py index 6a0e629a..1e1b287c 100644 --- a/contentctl/objects/base_test_result.py +++ b/contentctl/objects/base_test_result.py @@ -7,6 +7,7 @@ from contentctl.helper.utils import Utils +# TODO (#267): Align test reporting more closely w/ status enums (as it relates to "untested") # TODO (PEX-432): add status "UNSET" so that we can make sure the result is always of this enum # type; remove mypy ignores associated w/ these typing issues once we do class TestResultStatus(str, Enum): From fd2261b5a1efcfca5c57ee9def4dd9c1beee8531 Mon Sep 17 00:00:00 2001 From: Casey McGinley Date: Tue, 27 Aug 2024 12:27:34 -0700 Subject: [PATCH 17/18] TODOs for #268 --- contentctl/objects/detection_tags.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contentctl/objects/detection_tags.py b/contentctl/objects/detection_tags.py index 452eaa1e..f6f66930 100644 --- a/contentctl/objects/detection_tags.py +++ b/contentctl/objects/detection_tags.py @@ -107,6 +107,8 @@ def cis20(self) -> list[Cis18Value]: # TODO (#221): mappings should be fleshed out into a proper class mappings: Optional[List] = None # annotations: Optional[dict] = None + + # TODO (#268): Validate manual_test has length > 0 if not None manual_test: Optional[str] = None # The following validator is temporarily disabled pending further discussions From 09170dd5f1bdcb7703f9909b2be57e9e64d25b40 Mon Sep 17 00:00:00 2001 From: pyth0n1c <87383215+pyth0n1c@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:37:49 -0700 Subject: [PATCH 18/18] Update pyproject.toml Bump version in prep for release --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a9cd3ad3..0d4cc2dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "contentctl" -version = "4.3.2" +version = "4.3.3" description = "Splunk Content Control Tool" authors = ["STRT "] license = "Apache 2.0"