diff --git a/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py b/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py index 709d2c4a7f..6bdc980d57 100644 --- a/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py +++ b/tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py @@ -41,9 +41,36 @@ def __init__(self, ], webhook_triggers_batch_build=True) + # S3 bucket for testing internal fixes. + s3_read_write_policy = iam.PolicyDocument.from_json(s3_read_write_policy_in_json("aws-lc-codebuild")) + ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json([LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO])) + ec2_inline_policies = {"s3_read_write_policy": s3_read_write_policy, "ecr_power_user_policy": ecr_power_user_policy} + ec2_role = iam.Role(scope=self, id="{}-ec2-role".format(id), + role_name="{}-ec2-role".format(id), + assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"), + inline_policies=ec2_inline_policies, + managed_policies=[ + iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"), + iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy") + ]) + iam.CfnInstanceProfile(scope=self, id="{}-ec2-profile".format(id), + roles=[ec2_role.role_name], + instance_profile_name="{}-ec2-profile".format(id)) + + # create vpc for ec2s + vpc = ec2.Vpc(self, id="{}-ec2-vpc".format(id)) + selected_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS) + + # create security group with default rules + security_group = ec2.SecurityGroup(self, id="{}-ec2-sg".format(id), + allow_all_outbound=True, + vpc=vpc, + security_group_name='codebuild_ec2_sg') + + # Define a IAM role for this stack. code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id])) - ec2_policy = iam.PolicyDocument.from_json(ec2_policies_in_json()) + ec2_policy = iam.PolicyDocument.from_json(ec2_policies_in_json(ec2_role.role_name, security_group.security_group_id, selected_subnets.subnets[0].subnet_id, vpc.vpc_id)) ssm_policy = iam.PolicyDocument.from_json(ssm_policies_in_json()) codebuild_inline_policies = {"code_build_batch_policy": code_build_batch_policy, "ec2_policy": ec2_policy, @@ -67,37 +94,22 @@ def __init__(self, environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.SMALL, privileged=False, build_image=codebuild.LinuxBuildImage.STANDARD_4_0), - build_spec=BuildSpecLoader.load(spec_file_path)) + build_spec=BuildSpecLoader.load(spec_file_path), + environment_variables= { + "EC2_SECURITY_GROUP_ID": codebuild.BuildEnvironmentVariable( + value=security_group.security_group_id + ), + "EC2_SUBNET_ID": codebuild.BuildEnvironmentVariable( + value=selected_subnets.subnets[0].subnet_id + ), + "EC2_VPC_ID": codebuild.BuildEnvironmentVariable( + value=vpc.vpc_id + ), + }) project.enable_batch_builds() PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project) - # S3 bucket for testing internal fixes. - s3_read_write_policy = iam.PolicyDocument.from_json(s3_read_write_policy_in_json("aws-lc-codebuild")) - ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json([LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO])) - ec2_inline_policies = {"s3_read_write_policy": s3_read_write_policy, "ecr_power_user_policy": ecr_power_user_policy} - ec2_role = iam.Role(scope=self, id="{}-ec2-role".format(id), - role_name="{}-ec2-role".format(id), - assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"), - inline_policies=ec2_inline_policies, - managed_policies=[ - iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"), - iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy") - ]) - iam.CfnInstanceProfile(scope=self, id="{}-ec2-profile".format(id), - roles=[ec2_role.role_name], - instance_profile_name="{}-ec2-profile".format(id)) - - # create vpc for ec2s - vpc = ec2.Vpc(self, id="{}-ec2-vpc".format(id)) - selection = vpc.select_subnets() - - # create security group with default rules - security_group = ec2.SecurityGroup(self, id="{}-ec2-sg".format(id), - allow_all_outbound=True, - vpc=vpc, - security_group_name='codebuild_ec2_sg') - # Define logs for SSM. log_group_name = "{}-cw-logs".format(id) log_group = logs.CfnLogGroup(self, log_group_name, diff --git a/tests/ci/cdk/cdk/bm_framework_stack.py b/tests/ci/cdk/cdk/bm_framework_stack.py index 8a8feef005..d465ea2a27 100644 --- a/tests/ci/cdk/cdk/bm_framework_stack.py +++ b/tests/ci/cdk/cdk/bm_framework_stack.py @@ -5,14 +5,13 @@ import boto3 from botocore.exceptions import ClientError -from aws_cdk import Duration, Stack, aws_ec2 as ec2, aws_codebuild as codebuild, aws_iam as iam, aws_s3 as s3, aws_logs as logs +from aws_cdk import Duration, Stack, aws_ec2 as ec2, aws_codebuild as codebuild, aws_iam as iam, aws_logs as logs from constructs import Construct from cdk.components import PruneStaleGitHubBuilds from util.metadata import AWS_ACCOUNT, AWS_REGION, GITHUB_REPO_OWNER, GITHUB_REPO_NAME -from util.iam_policies import code_build_batch_policy_in_json, s3_read_write_policy_in_json, \ - ec2_bm_framework_policies_in_json, ssm_bm_framework_policies_in_json, s3_bm_framework_policies_in_json, \ - ecr_power_user_policy_in_json +from util.iam_policies import code_build_batch_policy_in_json, ec2_bm_framework_policies_in_json, \ + ssm_bm_framework_policies_in_json, ecr_power_user_policy_in_json from util.build_spec_loader import BuildSpecLoader # detailed documentation can be found here: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html @@ -28,8 +27,6 @@ def __init__(self, super().__init__(scope, id, **kwargs) # Define some variables that will be commonly used - S3_PROD_BUCKET = "{}-{}-prod-bucket".format(AWS_ACCOUNT, id) - S3_PR_BUCKET = "{}-{}-pr-bucket".format(AWS_ACCOUNT, id) CLOUDWATCH_LOGS = "{}-{}-cw-logs".format(AWS_ACCOUNT, id) # Define CodeBuild resource. @@ -49,17 +46,9 @@ def __init__(self, code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id])) ec2_bm_framework_policy = iam.PolicyDocument.from_json(ec2_bm_framework_policies_in_json()) ssm_bm_framework_policy = iam.PolicyDocument.from_json(ssm_bm_framework_policies_in_json()) - s3_read_write_policy_prod_bucket = iam.PolicyDocument.from_json(s3_read_write_policy_in_json(S3_PROD_BUCKET)) - s3_read_write_policy_pr_bucket = iam.PolicyDocument.from_json(s3_read_write_policy_in_json(S3_PR_BUCKET)) - s3_bm_framework_policy_prod_bucket = iam.PolicyDocument.from_json(s3_bm_framework_policies_in_json(S3_PROD_BUCKET)) - s3_bm_framework_policy_pr_bucket = iam.PolicyDocument.from_json(s3_bm_framework_policies_in_json(S3_PR_BUCKET)) codebuild_inline_policies = {"code_build_batch_policy": code_build_batch_policy, "ec2_bm_framework_policy": ec2_bm_framework_policy, - "ssm_bm_framework_policy": ssm_bm_framework_policy, - "s3_read_write_policy_prod_bucket": s3_read_write_policy_prod_bucket, - "s3_read_write_policy_pr_bucket": s3_read_write_policy_pr_bucket, - "s3_bm_framework_policy_prod_bucket": s3_bm_framework_policy_prod_bucket, - "s3_bm_framework_policy_pr_bucket": s3_bm_framework_policy_pr_bucket} + "ssm_bm_framework_policy": ssm_bm_framework_policy} codebuild_role = iam.Role(scope=self, id="{}-codebuild-role".format(id), assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), @@ -84,28 +73,6 @@ def __init__(self, PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project) - # use boto3 to determine if a bucket with the name that we want exists, and if it doesn't, create it - s3_res = boto3.resource('s3') - prod_bucket = s3_res.Bucket(S3_PROD_BUCKET) - pr_bucket = s3_res.Bucket(S3_PR_BUCKET) - try: - s3_res.meta.client.head_bucket(Bucket=prod_bucket.name) - except ClientError: - production_results_s3 = s3.Bucket(self, "{}-prod-bucket".format(id), - bucket_name=S3_PROD_BUCKET, - enforce_ssl=True) - - production_results_s3.grant_put(codebuild_role) - - try: - s3_res.meta.client.head_bucket(Bucket=pr_bucket.name) - except ClientError: - pr_results_s3 = s3.Bucket(self, "{}-pr-bucket".format(id), - bucket_name=S3_PR_BUCKET, - enforce_ssl=True) - - pr_results_s3.grant_put(codebuild_role) - # use boto3 to determine if a cloudwatch logs group with the name we want exists, and if it doesn't, create it logs_client = boto3.client('logs', region_name=AWS_REGION) try: diff --git a/tests/ci/cdk/cdk/windows_docker_image_build_stack.py b/tests/ci/cdk/cdk/windows_docker_image_build_stack.py index edabec930c..97009dfdd6 100644 --- a/tests/ci/cdk/cdk/windows_docker_image_build_stack.py +++ b/tests/ci/cdk/cdk/windows_docker_image_build_stack.py @@ -47,6 +47,8 @@ def __init__(self, ]) # Define Windows EC2 instance, where the SSM document will be executed. + # TODO: This AMI does not have docker installed by default anymore. Find another Windows machine + # that has docker by default or update the ssm document to properly install docker. machine_image = ec2.MachineImage.latest_windows( ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE) vpc = ec2.Vpc(scope=self, id="{}-vpc".format(id)) diff --git a/tests/ci/cdk/run-cdk.sh b/tests/ci/cdk/run-cdk.sh index eb069f2bcd..bfb9b153e0 100755 --- a/tests/ci/cdk/run-cdk.sh +++ b/tests/ci/cdk/run-cdk.sh @@ -67,8 +67,10 @@ function create_linux_docker_img_build_stack() { destroy_docker_img_build_stack # Deploy aws-lc ci stacks. # When repeatedly deploy, error 'EIP failed Reason: Maximum number of addresses has been reached' can happen. - # https://forums.aws.amazon.com/thread.jspa?messageID=952368 - # Workaround: go to AWS EIP console, release unused IP. + # + # Workaround: The default quota amount is 5 EIP addresses. Go to + # https://us-west-2.console.aws.amazon.com/servicequotas/home/services/ec2/quotas and request a quota + # increase for "EC2-VPC Elastic IPs". cdk deploy aws-lc-docker-image-build-linux --require-approval never } @@ -77,8 +79,10 @@ function create_win_docker_img_build_stack() { destroy_docker_img_build_stack # Deploy aws-lc ci stacks. # When repeatedly deploy, error 'EIP failed Reason: Maximum number of addresses has been reached' can happen. - # https://forums.aws.amazon.com/thread.jspa?messageID=952368 - # Workaround: go to AWS EIP console, release unused IP. + # + # Workaround: The default quota amount is 5 EIP addresses. Go to + # https://us-west-2.console.aws.amazon.com/servicequotas/home/services/ec2/quotas and request a quota + # increase for "EC2-VPC Elastic IPs". cdk deploy aws-lc-docker-image-build-windows --require-approval never } diff --git a/tests/ci/cdk/util/iam_policies.py b/tests/ci/cdk/util/iam_policies.py index 690047be41..ee71112c05 100644 --- a/tests/ci/cdk/util/iam_policies.py +++ b/tests/ci/cdk/util/iam_policies.py @@ -5,7 +5,7 @@ from util.metadata import AWS_REGION, AWS_ACCOUNT -def ec2_policies_in_json(): +def ec2_policies_in_json(ec2_role_name, ec2_security_group_id, ec2_subnet_id, ec2_vpc_id): """ Define an IAM policy that gives permissions for starting, stopping, and getting details of EC2 instances and their Vpcs :return: an IAM policy statement in json. @@ -16,36 +16,21 @@ def ec2_policies_in_json(): { "Effect": "Allow", "Action": [ + "iam:PassRole", "ec2:RunInstances", "ec2:TerminateInstances", "ec2:CreateTags", "ec2:DescribeInstances", - "ec2:DescribeVpcs", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets" ], "Resource": [ - "*" - ] - }] - } - -def s3_bm_framework_policies_in_json(s3_bucket_name): - """ - Define an IAM policy that gives some s3 permissions needed by the EC2 instances of the benchmarking framework - """ - return { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::{}".format(s3_bucket_name), - "arn:aws:s3:::{}/*".format(s3_bucket_name) + "arn:aws:iam::{}:role/*".format(AWS_ACCOUNT, ec2_role_name), + "arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT), + "arn:aws:ec2:{}::image/*".format(AWS_REGION), + "arn:aws:ec2:{}:{}:network-interface/*".format(AWS_REGION, AWS_ACCOUNT), + "arn:aws:ec2:{}:{}:volume/*".format(AWS_REGION, AWS_ACCOUNT), + "arn:aws:ec2:{}:{}:security-group/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_security_group_id), + "arn:aws:ec2:{}:{}:subnet/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_subnet_id), + "arn:aws:ec2:{}:{}:vpc/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_vpc_id), ] }] } @@ -62,15 +47,16 @@ def ssm_policies_in_json(): { "Effect": "Allow", "Action": [ - "iam:PassRole", + "ssm:SendCommand", "ssm:CreateDocument", "ssm:DeleteDocument", - "ssm:SendCommand", "ssm:ListCommands", "ssm:DescribeInstanceInformation" ], "Resource": [ - "*" + "arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT), # Needed for ssm:SendCommand + "arn:aws:ssm:{}:{}:*".format(AWS_REGION, AWS_ACCOUNT), + "arn:aws:ssm:{}:{}:document/*".format(AWS_REGION, AWS_ACCOUNT), ] }] } @@ -161,8 +147,8 @@ def s3_read_write_policy_in_json(s3_bucket_name): { "Effect": "Allow", "Action": [ - "s3:Put*", - "s3:Get*" + "s3:PutObject", + "s3:GetObject" ], "Resource": [ "arn:aws:s3:::{}/*".format(s3_bucket_name) diff --git a/tests/ci/run_ec2_test_framework.sh b/tests/ci/run_ec2_test_framework.sh index 2d3e7153f0..d37d3312a9 100755 --- a/tests/ci/run_ec2_test_framework.sh +++ b/tests/ci/run_ec2_test_framework.sh @@ -31,7 +31,7 @@ generate_ssm_document_file() { create_ec2_instances() { local instance_id instance_id="$(aws ec2 run-instances --image-id "$1" --count 1 \ - --instance-type "$2" --security-group-ids "${sg_id}" --subnet-id "${subnet_id}" \ + --instance-type "$2" --security-group-ids "${EC2_SECURITY_GROUP_ID}" --subnet-id "${EC2_SUBNET_ID}" \ --block-device-mappings 'DeviceName="/dev/sda1",Ebs={DeleteOnTermination=True,VolumeSize=200}' \ --tag-specifications 'ResourceType="instance",Tags=[{Key="Name",Value="ec2-test-'"$CODEBUILD_WEBHOOK_TRIGGER"'"}]' \ --iam-instance-profile Name=aws-lc-ci-ec2-test-framework-ec2-profile \ @@ -53,11 +53,6 @@ export ec2_instance_type="$2" export ecr_docker_tag="$3" export s3_bucket_name="aws-lc-codebuild" -# Get resources for ec2 instances. These were created with the cdk script. -vpc_id="$(aws ec2 describe-vpcs --filter Name=tag:Name,Values=aws-lc-ci-ec2-test-framework/aws-lc-ci-ec2-test-framework-ec2-vpc --query Vpcs[*].VpcId --output text)" -sg_id="$(aws ec2 describe-security-groups --filter Name=vpc-id,Values="${vpc_id}" --filter Name=group-name,Values=codebuild_ec2_sg --query SecurityGroups[*].GroupId --output text)" -subnet_id="$(aws ec2 describe-subnets --filter Name=vpc-id,Values="${vpc_id}" --filter Name=state,Values=available --filter Name=tag:Name,Values=aws-lc-ci-ec2-test-framework/aws-lc-ci-ec2-test-framework-ec2-vpc/PrivateSubnet1 --query Subnets[*].SubnetId --output text)" - # create the ssm documents that will be used for the various ssm commands generate_ssm_document_file