diff --git a/.gitignore b/.gitignore index e008f92..beb52b0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .venv* snippits.py -vpc_samples/ \ No newline at end of file +vpc_samples/ +*pyc \ No newline at end of file diff --git a/lambda_functions/PayloadCreator/PayloadCreatorLambda-h4v5b6lMGO1f-6fa45f3b-0f8b-4e4f-bfbb-89d1238ed13a.zip b/lambda_functions/PayloadCreator/PayloadCreatorLambda-h4v5b6lMGO1f-6fa45f3b-0f8b-4e4f-bfbb-89d1238ed13a.zip new file mode 100644 index 0000000..3921b2a Binary files /dev/null and b/lambda_functions/PayloadCreator/PayloadCreatorLambda-h4v5b6lMGO1f-6fa45f3b-0f8b-4e4f-bfbb-89d1238ed13a.zip differ diff --git a/lambda_functions/PayloadCreator/lambda_function.py b/lambda_functions/PayloadCreator/lambda_function.py new file mode 100644 index 0000000..9cc22ab --- /dev/null +++ b/lambda_functions/PayloadCreator/lambda_function.py @@ -0,0 +1,30 @@ +from datetime import datetime, timedelta, date + +def lambda_handler(event, context): + print(event.keys()) + if 'generatedDate' in event.keys(): + outputFileArray = event['outputFileArray'] + outputCsv = event['outputCsv'] + outputFileArray.append(outputCsv) + outputRows = event['outputRows'] + + queryParams = event['queryParams'] + + queryParams['queryOffset'] += queryParams['queryLimit'] + + payload_dict = { + 'queryParams': queryParams, + 'outputFileArray': outputFileArray, + 'outputRows': outputRows + } + else: + print("date to be added to output") + date_yst = str(date.today() - timedelta(1)) + payload_dict = { + 'date': date_yst, + 'day': date_yst.split("-")[2], + 'month': date_yst.split("-")[1], + 'year': date_yst.split("-")[0] + } + + return payload_dict \ No newline at end of file diff --git a/scripts/flow_logs_parser.py b/scripts/flow_logs_parser.py index ebd1011..7ae51ed 100644 --- a/scripts/flow_logs_parser.py +++ b/scripts/flow_logs_parser.py @@ -13,6 +13,8 @@ from hashlib import sha1 from functools import lru_cache from copy import deepcopy +import functools +import statistics args = getResolvedOptions(sys.argv, [ @@ -24,7 +26,8 @@ 'DynamoTableName', 'SGARulesUseIndex', 'SGSortTableName', - 'path' + 'path', + 'queryCsv' ]) s3 = boto3.resource('s3', args['region']) @@ -38,10 +41,32 @@ sg_analysis_rules_use_idx= args["SGARulesUseIndex"] sg_sort_table= args["SGSortTableName"] athena_s3_prefix = args['path'] +query_csv = args['outputCsv'] date_yst = (date.today() - timedelta(1)) my_bucket = s3.Bucket(flow_logs_athena_results_bucket) +sg_rule_id_query_results = [] +get_sg_ref_ips_results = [] +rule_matcher_results = [] +security_group_rule_parser_results = [] +get_sg_rule_id_results = [] +get_interface_ddb_results = [] + +def timer(timer_results): + def timer_decorator(func): + @functools.wraps(func) + def wrapper_timer(*args, **kwargs): + tic = time.perf_counter() + value = func(*args, **kwargs) + toc = time.perf_counter() + elapsed_time = toc - tic + timer_results.append(elapsed_time) + print(f"Elapsed time: {elapsed_time:0.4f} seconds") + return value + return wrapper_timer + return timer_decorator + def network_test(rule_block,flow_addr): net = IPv4Network(rule_block) addr = IPv4Address(flow_addr) @@ -71,6 +96,7 @@ def rule_filter(resp_list): cidr_rules = [r for r in resp_list if r['properties'].get('CidrIpv4')] return (ref_rules,cidr_rules) +@timer(timer_results=get_sg_ref_ips_results) @lru_cache(maxsize=32) def get_sg_ref_ips(sg_id): deserialize = TypeDeserializer() @@ -96,7 +122,7 @@ def port_test(rule_port_from,rule_port_to,flow_port): return True else: return False - +@timer(timer_results=rule_matcher_results) def rule_matcher(resp_list,flow): [r.update({'match_score':1}) for r in resp_list] if len(resp_list) == 1: @@ -120,42 +146,57 @@ def rule_matcher(resp_list,flow): return max_score_list -def get_sg_rule_id(sg_id, flow_count, protocol, flow_dir, addr, dstport): + +@timer(timer_results=sg_rule_id_query_results) +@lru_cache(maxsize=2048) +def get_sg_rule_id_dynamo_query(sg_id): + response=dynamodb.query( + TableName=sg_rules_tbl_name, + IndexName=sg_rules_group_idx, + KeyConditions={ + "group_id":{ + 'ComparisonOperator': 'EQ', + 'AttributeValueList': [ {"S": sg_id} ] + } + } + ) + + return response + +@timer(timer_results=security_group_rule_parser_results) +def security_group_rule_parser(response, flow_dir): deserializer = TypeDeserializer() + if flow_dir == 'egress': + resp_list = [{k: deserializer.deserialize(v) for k, v in r.items()} for r in response['Items'] if r['properties']['M']['IsEgress']['BOOL'] == True] + else: + resp_list = [{k: deserializer.deserialize(v) for k, v in r.items()} for r in response['Items'] if r['properties']['M']['IsEgress']['BOOL'] == False] + return resp_list + +@timer(timer_results=get_sg_rule_id_results) +def get_sg_rule_id(sg_id, flow_count, protocol, flow_dir, addr, dstport): try: - - response=dynamodb.query( - TableName=sg_rules_tbl_name, - IndexName=sg_rules_group_idx, - KeyConditions={ - "group_id":{ - 'ComparisonOperator': 'EQ', - 'AttributeValueList': [ {"S": sg_id} ] - } - } - ) + response = get_sg_rule_id_dynamo_query(sg_id) + flow_object = { 'flow_count': flow_count, 'addr': addr, 'port': dstport, 'protocol': protocol, } - if flow_dir == 'egress': - resp_list = [{k: deserializer.deserialize(v) for k, v in r.items()} for r in response['Items'] if r['properties']['M']['IsEgress']['BOOL'] == True] - else: - resp_list = [{k: deserializer.deserialize(v) for k, v in r.items()} for r in response['Items'] if r['properties']['M']['IsEgress']['BOOL'] == False] - - try: - result = rule_matcher(resp_list,flow_object)[0] - print(f"rule found for flow: sg_rule_id={result['id']},sg_id={result['group_id']},flow_dir={flow_dir},protocol={flow_object['protocol']},addr={flow_object['addr']},dstport={flow_object['port']}") - insert_usage_data(sg_rule_id=result['id'],sg_id=result['group_id'],flow_dir=flow_dir,**flow_object) - except Exception as e: - print(f'no rule found for flow:{flow_object} - {flow_dir}') - print(f'error: {e}') - # raise e - except Exception as e: print("There was an error while trying to perform DynamoDB get operation on Rules table: "+str(e)) + + resp_list = security_group_rule_parser(response, flow_dir) + + try: + result = rule_matcher(resp_list,flow_object)[0] + print(f"rule found for flow: sg_rule_id={result['id']},sg_id={result['group_id']},flow_dir={flow_dir},protocol={flow_object['protocol']},addr={flow_object['addr']},dstport={flow_object['port']}") + insert_usage_data(sg_rule_id=result['id'],sg_id=result['group_id'],flow_dir=flow_dir,**flow_object) + except Exception as e: + print(f'no rule found for flow:{flow_object} - {flow_dir}') + print(f'error: {e}') + # raise e + def insert_usage_data(sg_rule_id, sg_id, flow_dir, flow_count, addr, port, protocol): addr_rule_hash = [sg_rule_id,addr,port,protocol] @@ -198,6 +239,9 @@ def insert_usage_data(sg_rule_id, sg_id, flow_dir, flow_count, addr, port, proto print("There was an error while trying to perform DynamoDB insert operation on Usage table: "+str(e)) # raise e + +@timer(timer_results=get_interface_ddb_results) +@lru_cache(maxsize=1024) def get_interface_ddb(id:str) -> dict: deserialize = TypeDeserializer() response = dynamodb.get_item( @@ -211,11 +255,13 @@ def get_interface_ddb(id:str) -> dict: print (f'nic id: {id} not found!') + + def main(): - s3_folder_path = f's3://{flow_logs_athena_results_bucket}/{athena_s3_prefix}/{date_yst.isoformat().replace("-","/")}/' + query_csv_path = f's3://{flow_logs_athena_results_bucket}/{athena_s3_prefix}/{date_yst.isoformat().replace("-","/")}/{query_csv}' start = time.time() print("Writing rules data to DynamoDB table- started at: "+str(datetime.now())) - dfs = wr.s3.read_csv(path=s3_folder_path, chunksize=1000, encoding = 'ISO-8859-1') + dfs = wr.s3.read_csv(path=query_csv_path, chunksize=1000, encoding = 'ISO-8859-1') for df in dfs: try: df_row_count = len(df) - 1 @@ -224,7 +270,7 @@ def main(): print(f'processing row {index} of {df_row_count}') if row is not None and 'dstport' in row: nw_int_info = get_interface_ddb(id=row['interface_id']) - + for grp in nw_int_info['security_group_ids']: print(grp, row['flow_count'], row['protocol'],row['flow_direction'],row['addr'],row['dstport']) get_sg_rule_id(grp, row['flow_count'], row['protocol'],row['flow_direction'],row['addr'],row['dstport']) @@ -234,6 +280,25 @@ def main(): print(f'error: {e}') # raise e + #print(f'sg_rule_id_query cache stats') + #print(get_sg_rule_id_dynamo_query.cache_info()) + print(f'quantiles for sg_rule_id_query: {statistics.quantiles(sg_rule_id_query_results)}') + + #print(f'get_sg_ref_ips cache stats') + #print(get_sg_ref_ips.cache_info()) + print(f'quantiles for get_sg_ref_ips: {statistics.quantiles(get_sg_ref_ips_results)}') + + print(f'quantiles for rule_matcher: {statistics.quantiles(rule_matcher_results)}') + + #print(f'security_group_rule_parser cache stats') + #print(security_group_rule_parser.cache_info()) + print(f'quantiles for security_group_rule_parser: {statistics.quantiles(security_group_rule_parser_results)}') + + print(f'quantiles for get_sg_rule_id: {statistics.quantiles(get_sg_rule_id_results)}') + + print(f'quantiles for get_interface_ddb: {statistics.quantiles(get_interface_ddb_results)}') + + print("Writing rules data to DynamoDB table- completed at: "+str(datetime.now())) end = time.time() print("Total time taken in minutes: "+str((end - start)/60)) diff --git a/scripts/query_athena.py b/scripts/query_athena.py index 47331cc..4141896 100644 --- a/scripts/query_athena.py +++ b/scripts/query_athena.py @@ -27,11 +27,11 @@ date_yst = (date.today() - timedelta(1)) if date_yst.day > 9: params['query'] = f""" - select count("interface_id") as flow_count, interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr") as addr, dstport FROM {params['database']}.\"{params['table']}\" WHERE dstport is not null and day='{date_yst.day}' and action='ACCEPT' group by interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr"), dstport, dstport having count(interface_id) > 5 order by flow_count desc limit 60000 + select count("interface_id") as flow_count, interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr") as addr, dstport FROM {params['database']}.\"{params['table']}\" WHERE dstport is not null and day='{date_yst.day}' and action='ACCEPT' group by interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr"), dstport, dstport having count(interface_id) > 5 order by interface_id, flow_count desc limit 10000 """ else: params['query'] = f""" - select count("interface_id") as flow_count, interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr") as addr, dstport FROM {params['database']}.\"{params['table']}\" WHERE dstport is not null and day='0{date_yst.day}' and action='ACCEPT' group by interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr"), dstport, dstport having count(interface_id) > 5 order by flow_count desc limit 60000 + select count("interface_id") as flow_count, interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr") as addr, dstport FROM {params['database']}.\"{params['table']}\" WHERE dstport is not null and day='0{date_yst.day}' and action='ACCEPT' group by interface_id, protocol, flow_direction, if("flow_direction"='ingress',"srcaddr","dstaddr"), dstport, dstport having count(interface_id) > 5 order by interface_id, flow_count desc limit 10000 """ session = boto3.Session() diff --git a/templates/athena_integration_account_glue_table.yaml b/templates/athena_integration_account_glue_table.yaml index 4bee91f..5cee610 100644 --- a/templates/athena_integration_account_glue_table.yaml +++ b/templates/athena_integration_account_glue_table.yaml @@ -232,7 +232,7 @@ Resources: Handler: 'index.handler' Timeout: 60 - Runtime: nodejs18.x + Runtime: nodejs14.x ReservedConcurrentExecutions: 1 Role: !GetAtt VpcFlowLogsTableIntegrationLambdaExecutorRole.Arn diff --git a/templates/sg_rule_analysis.yaml b/templates/sg_rule_analysis.yaml index 6262d20..6c4a1e2 100644 --- a/templates/sg_rule_analysis.yaml +++ b/templates/sg_rule_analysis.yaml @@ -34,9 +34,19 @@ Resources: Action: sts:AssumeRole Path: / ManagedPolicyArns: - - arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole - !Ref SgaCloudWatchLogsPolicy Policies: + - PolicyName: SgaSfGlueJobPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - glue:StartJobRun + - glue:GetJobRun + - glue:GetJobRuns + - glue:BatchStopJobRun + Resource: "*" - PolicyName: SgaSfLambdaInvokePolicy PolicyDocument: Version: '2012-10-17' @@ -48,6 +58,7 @@ Resources: - !GetAtt SgaGetSgLambda.Arn - !GetAtt SgaGetEniLambda.Arn - !GetAtt SgaSortSGLambda.Arn + - !GetAtt PayloadCreatorLambda.Arn - PolicyName: SgaSfCwlStepFunctions PolicyDocument: Version: '2012-10-17' @@ -63,6 +74,39 @@ Resources: - logs:DescribeResourcePolicies - logs:DescribeLogGroups Resource: "*" + - PolicyName: StepFunctionAthenaPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - athena:StartQueryExecution + - athena:GetQueryRuntimeStatistics + - athena:GetQueryExecution + - athena:GetQueryResults + - glue:GetTable + - glue:GetPartitions + Resource: "*" + - PolicyName: StepFunctionS3Access + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - s3:Get* + - s3:List* + - s3:PutObject + Resource: + - !ImportValue sga-athena-bucket-arn + - !Join + - '' + - - !ImportValue sga-athena-bucket-arn + - "/*" + - !Ref SgaVpcFlowLogBucket + - !Join + - '' + - - !Ref SgaVpcFlowLogBucket + - "/*" SgaEventBridgeIAMManagedPolicy: @@ -172,7 +216,8 @@ Resources: StepFunctionsStateMachine: Type: "AWS::StepFunctions::StateMachine" Properties: - DefinitionString: !Sub | + DefinitionString: !Sub + - | { "StartAt": "ProcessItems", "States": { @@ -180,8 +225,19 @@ Resources: "Type": "Map", "ItemsPath": "$.data", "Iterator": { - "StartAt": "GetSG", + "StartAt": "SkipLambdaExecution", "States": { + "SkipLambdaExecution": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.skipLambda", + "BooleanEquals": true, + "Next": "DateGenerator" + } + ], + "Default": "GetSG" + }, "GetSG": { "Type": "Task", "Resource": "${SgaGetSgLambda.Arn}", @@ -239,37 +295,162 @@ Resources: "BackoffRate": 2 } ], - "Next": "QueryVPCFlowLogsSaveToS3", + "Next": "DateGenerator", "ResultPath": null }, - "QueryVPCFlowLogsSaveToS3": { + "DateGenerator": { "Type": "Task", - "Resource": "arn:aws:states:::glue:startJobRun.sync", + "Resource": "arn:aws:states:::aws-sdk:lambda:invoke", "Parameters": { - "JobName": "${SgaStartAthenaQueryGlueJob}", - "Arguments": { - "--region.$": "$.region", - "--table.$": "$.table", - "--path.$": "$.path" + "FunctionName": "${PayloadCreatorLambda.Arn}", + "Payload.$": "$" + }, + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException", + "Lambda.TooManyRequestsException" + ], + "IntervalSeconds": 2, + "MaxAttempts": 6, + "BackoffRate": 2 } + ], + "Next": "QueryParameters", + "ResultPath": "$.generatedDate", + "ResultSelector": { + "date.$": "States.StringToJson($.Payload)" + } + }, + "QueryParameters": { + "Type": "Pass", + "Parameters": { + "queryParams": { + "queryString": "SELECT count(\"interface_id\") as flow_count, interface_id, protocol, flow_direction, if(\"flow_direction\"='ingress',\"srcaddr\",\"dstaddr\") as addr, dstport FROM \"vpcflowlogsathenadatabase\".\"{}\" WHERE dstport is not null and day='{}' and month='{}' and action='ACCEPT' group by interface_id, protocol, flow_direction, if(\"flow_direction\"='ingress',\"srcaddr\",\"dstaddr\"), dstport, dstport having count(interface_id) > 5 order by interface_id, flow_count desc offset {} limit {}", + "outputLocation": "s3://${AthenaBucket}/{}/{}/{}/{}/", + "queryOffset": 0, + "queryLimit": 40000 + }, + "outputFileArray": [], + "data.$": "$" }, - "Next": "ParseVPCFLowLogsSaveUsageCount", - "InputPath": "$", - "ResultPath": null + "Next": "Athena StartQueryExecution" }, - "ParseVPCFLowLogsSaveUsageCount": { + "Athena StartQueryExecution": { "Type": "Task", - "Resource": "arn:aws:states:::glue:startJobRun.sync", + "Resource": "arn:aws:states:::athena:startQueryExecution.sync", "Parameters": { - "JobName": "${SgaParseFlowLogsGlueJob}", - "Arguments": { - "--region.$": "$.region", - "--table.$": "$.table", - "--path.$": "$.path" + "QueryString.$": "States.Format($.queryParams.queryString, $.data.table, $.data.generatedDate.date.day, $.data.generatedDate.date.month, $.queryParams.queryOffset, $.queryParams.queryLimit)", + "WorkGroup": "primary", + "ResultConfiguration": { + "OutputLocation.$": "States.Format($.queryParams.outputLocation, $.data.path, $.data.generatedDate.date.year, $.data.generatedDate.date.month, $.data.generatedDate.date.day)" } }, - "InputPath": "$", - "End": true + "Next": "GetQueryRuntimeStatistics", + "ResultPath": "$.athenaQuery", + "ResultSelector": { + "queryExecutionId.$": "$.QueryExecution.QueryExecutionId", + "outputCsv.$": "States.Format('{}.csv',$.QueryExecution.QueryExecutionId)" + } + }, + "GetQueryRuntimeStatistics": { + "Type": "Task", + "Next": "PayloadCreator", + "Parameters": { + "QueryExecutionId.$": "$.athenaQuery.queryExecutionId" + }, + "Resource": "arn:aws:states:::aws-sdk:athena:getQueryRuntimeStatistics", + "ResultSelector": { + "outputRows.$": "$.QueryRuntimeStatistics.OutputStage.OutputRows" + }, + "ResultPath": "$.queryRows" + }, + "PayloadCreator": { + "Type": "Task", + "Resource": "arn:aws:states:::aws-sdk:lambda:invoke", + "Parameters": { + "FunctionName": "${PayloadCreatorLambda.Arn}", + "Payload": { + "outputCsv.$": "$.athenaQuery.outputCsv", + "queryParams.$": "$.queryParams", + "outputFileArray.$": "$.outputFileArray", + "outputRows.$": "$.queryRows.outputRows", + "generatedDate.$": "$.data.generatedDate" + } + }, + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException", + "Lambda.TooManyRequestsException" + ], + "IntervalSeconds": 2, + "MaxAttempts": 6, + "BackoffRate": 2 + } + ], + "Next": "PayloadFormatter", + "ResultSelector": { + "lambdaResult.$": "States.StringToJson($.Payload)" + }, + "ResultPath": "$.LambdaResult" + }, + "PayloadFormatter": { + "Type": "Pass", + "Next": "Choice", + "Parameters": { + "queryParams.$": "$.LambdaResult.lambdaResult.queryParams", + "data.$": "$.data", + "outputRows.$": "$.LambdaResult.lambdaResult.outputRows", + "outputFileArray.$": "$.LambdaResult.lambdaResult.outputFileArray", + "generatedDate.$": "$.data.generatedDate" + } + }, + "Choice": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.outputRows", + "NumericEqualsPath": "$.queryParams.queryLimit", + "Next": "Athena StartQueryExecution" + } + ], + "Default": "AnalysisMap" + }, + "AnalysisMap": { + "Type": "Map", + "ItemProcessor": { + "ProcessorConfig": { + "Mode": "INLINE" + }, + "StartAt": "VpcFlowLogParser", + "States": { + "VpcFlowLogParser": { + "Type": "Task", + "Resource": "arn:aws:states:::glue:startJobRun.sync", + "Parameters": { + "JobName": "${SgaParseFlowLogsGlueJob}", + "Arguments": { + "--region.$": "$.data.region", + "--outputCsv.$": "$.outputCsv", + "--path.$": "$.data.path" + } + }, + "InputPath": "$", + "End": true + } + } + }, + "End": true, + "ItemsPath": "$.outputFileArray", + "ItemSelector": { + "data.$": "$.data", + "outputCsv.$": "$$.Map.Item.Value" + } } } }, @@ -277,6 +458,7 @@ Resources: } } } + - AthenaBucket: !ImportValue sga-athena-bucket-ref RoleArn: !GetAtt SgaStepFunctionIAMRole.Arn StateMachineType: "STANDARD" LoggingConfiguration: @@ -308,7 +490,8 @@ Resources: "AccountNo": "012345678901", "region": "eu-west-2", "table": "SomeGlueTable", - "path": "path/path" + "path": "path/path", + "skipLambda": false } ] } @@ -484,7 +667,7 @@ Resources: Description: "Job to parse flow logs and calculate usage" Role: !GetAtt SgaParseFlowLogsGlueJobIAMRole.Arn ExecutionProperty: - MaxConcurrentRuns: 1 + MaxConcurrentRuns: 10 Command: Name: "pythonshell" ScriptLocation: !Join @@ -512,6 +695,7 @@ Resources: --NICInterfaceTable: !Ref DynamoTableENIAnalysis --DynamoTableName: !Ref DynamoTableSGAnalysis --SGARulesUseIndex: "addr-id-index" + --outputCsv: "ouputcsv" --path: "vpcflowlogs" MaxRetries: 1 Timeout: 2880 @@ -693,4 +877,28 @@ Resources: Timeout: 900 Code: S3Bucket: !ImportValue sga-resources-bucket-ref - S3Key: SortSecurityGroupReferences-33408ae9-78dc-426b-b200-36363544cffc.zip \ No newline at end of file + S3Key: SortSecurityGroupReferences-33408ae9-78dc-426b-b200-36363544cffc.zip + + PayloadCreatorRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: sts:AssumeRole + Path: / + ManagedPolicyArns: + - !Ref SgaCloudWatchLogsPolicy + + PayloadCreatorLambda: + Type: "AWS::Lambda::Function" + Properties: + Role: !GetAtt PayloadCreatorRole.Arn + Handler: lambda_function.lambda_handler + Runtime: python3.10 + Code: + S3Bucket: !ImportValue sga-resources-bucket-ref + S3Key: PayloadCreatorLambda-h4v5b6lMGO1f-6fa45f3b-0f8b-4e4f-bfbb-89d1238ed13a.zip \ No newline at end of file