Skip to content

Commit

Permalink
Fix overscoped json policies in CI (#1494)
Browse files Browse the repository at this point in the history
There were a few overscoped IAM policies in our CI. Best practice is to
minimize permissions, so we're enforcing that here.
* I've moved some of the ec2 resource creations to cdk instead of the
original cli methods. The framework should be able to access these via
Codebuild environment variables.
* I checked our S3 bucket policies, but discovered that the automated
windows docker doesn't work anymore. This was due to docker not being
installed by default. The new minimal S3 permissions work, but I've left
a TODO to fix this later on.
  • Loading branch information
samuel40791765 authored Mar 22, 2024
1 parent 1af7018 commit 5ede432
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 105 deletions.
68 changes: 40 additions & 28 deletions tests/ci/cdk/cdk/aws_lc_ec2_test_framework_ci_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,36 @@ def __init__(self,
],
webhook_triggers_batch_build=True)

# S3 bucket for testing internal fixes.
s3_read_write_policy = iam.PolicyDocument.from_json(s3_read_write_policy_in_json("aws-lc-codebuild"))
ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json([LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO]))
ec2_inline_policies = {"s3_read_write_policy": s3_read_write_policy, "ecr_power_user_policy": ecr_power_user_policy}
ec2_role = iam.Role(scope=self, id="{}-ec2-role".format(id),
role_name="{}-ec2-role".format(id),
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
inline_policies=ec2_inline_policies,
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"),
iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy")
])
iam.CfnInstanceProfile(scope=self, id="{}-ec2-profile".format(id),
roles=[ec2_role.role_name],
instance_profile_name="{}-ec2-profile".format(id))

# create vpc for ec2s
vpc = ec2.Vpc(self, id="{}-ec2-vpc".format(id))
selected_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)

# create security group with default rules
security_group = ec2.SecurityGroup(self, id="{}-ec2-sg".format(id),
allow_all_outbound=True,
vpc=vpc,
security_group_name='codebuild_ec2_sg')


# Define a IAM role for this stack.
code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id]))
ec2_policy = iam.PolicyDocument.from_json(ec2_policies_in_json())
ec2_policy = iam.PolicyDocument.from_json(ec2_policies_in_json(ec2_role.role_name, security_group.security_group_id, selected_subnets.subnets[0].subnet_id, vpc.vpc_id))
ssm_policy = iam.PolicyDocument.from_json(ssm_policies_in_json())
codebuild_inline_policies = {"code_build_batch_policy": code_build_batch_policy,
"ec2_policy": ec2_policy,
Expand All @@ -67,37 +94,22 @@ def __init__(self,
environment=codebuild.BuildEnvironment(compute_type=codebuild.ComputeType.SMALL,
privileged=False,
build_image=codebuild.LinuxBuildImage.STANDARD_4_0),
build_spec=BuildSpecLoader.load(spec_file_path))
build_spec=BuildSpecLoader.load(spec_file_path),
environment_variables= {
"EC2_SECURITY_GROUP_ID": codebuild.BuildEnvironmentVariable(
value=security_group.security_group_id
),
"EC2_SUBNET_ID": codebuild.BuildEnvironmentVariable(
value=selected_subnets.subnets[0].subnet_id
),
"EC2_VPC_ID": codebuild.BuildEnvironmentVariable(
value=vpc.vpc_id
),
})
project.enable_batch_builds()

PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project)

# S3 bucket for testing internal fixes.
s3_read_write_policy = iam.PolicyDocument.from_json(s3_read_write_policy_in_json("aws-lc-codebuild"))
ecr_power_user_policy = iam.PolicyDocument.from_json(ecr_power_user_policy_in_json([LINUX_X86_ECR_REPO, LINUX_AARCH_ECR_REPO]))
ec2_inline_policies = {"s3_read_write_policy": s3_read_write_policy, "ecr_power_user_policy": ecr_power_user_policy}
ec2_role = iam.Role(scope=self, id="{}-ec2-role".format(id),
role_name="{}-ec2-role".format(id),
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
inline_policies=ec2_inline_policies,
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"),
iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchAgentServerPolicy")
])
iam.CfnInstanceProfile(scope=self, id="{}-ec2-profile".format(id),
roles=[ec2_role.role_name],
instance_profile_name="{}-ec2-profile".format(id))

# create vpc for ec2s
vpc = ec2.Vpc(self, id="{}-ec2-vpc".format(id))
selection = vpc.select_subnets()

# create security group with default rules
security_group = ec2.SecurityGroup(self, id="{}-ec2-sg".format(id),
allow_all_outbound=True,
vpc=vpc,
security_group_name='codebuild_ec2_sg')

# Define logs for SSM.
log_group_name = "{}-cw-logs".format(id)
log_group = logs.CfnLogGroup(self, log_group_name,
Expand Down
41 changes: 4 additions & 37 deletions tests/ci/cdk/cdk/bm_framework_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@
import boto3

from botocore.exceptions import ClientError
from aws_cdk import Duration, Stack, aws_ec2 as ec2, aws_codebuild as codebuild, aws_iam as iam, aws_s3 as s3, aws_logs as logs
from aws_cdk import Duration, Stack, aws_ec2 as ec2, aws_codebuild as codebuild, aws_iam as iam, aws_logs as logs
from constructs import Construct

from cdk.components import PruneStaleGitHubBuilds
from util.metadata import AWS_ACCOUNT, AWS_REGION, GITHUB_REPO_OWNER, GITHUB_REPO_NAME
from util.iam_policies import code_build_batch_policy_in_json, s3_read_write_policy_in_json, \
ec2_bm_framework_policies_in_json, ssm_bm_framework_policies_in_json, s3_bm_framework_policies_in_json, \
ecr_power_user_policy_in_json
from util.iam_policies import code_build_batch_policy_in_json, ec2_bm_framework_policies_in_json, \
ssm_bm_framework_policies_in_json, ecr_power_user_policy_in_json
from util.build_spec_loader import BuildSpecLoader

# detailed documentation can be found here: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html
Expand All @@ -28,8 +27,6 @@ def __init__(self,
super().__init__(scope, id, **kwargs)

# Define some variables that will be commonly used
S3_PROD_BUCKET = "{}-{}-prod-bucket".format(AWS_ACCOUNT, id)
S3_PR_BUCKET = "{}-{}-pr-bucket".format(AWS_ACCOUNT, id)
CLOUDWATCH_LOGS = "{}-{}-cw-logs".format(AWS_ACCOUNT, id)

# Define CodeBuild resource.
Expand All @@ -49,17 +46,9 @@ def __init__(self,
code_build_batch_policy = iam.PolicyDocument.from_json(code_build_batch_policy_in_json([id]))
ec2_bm_framework_policy = iam.PolicyDocument.from_json(ec2_bm_framework_policies_in_json())
ssm_bm_framework_policy = iam.PolicyDocument.from_json(ssm_bm_framework_policies_in_json())
s3_read_write_policy_prod_bucket = iam.PolicyDocument.from_json(s3_read_write_policy_in_json(S3_PROD_BUCKET))
s3_read_write_policy_pr_bucket = iam.PolicyDocument.from_json(s3_read_write_policy_in_json(S3_PR_BUCKET))
s3_bm_framework_policy_prod_bucket = iam.PolicyDocument.from_json(s3_bm_framework_policies_in_json(S3_PROD_BUCKET))
s3_bm_framework_policy_pr_bucket = iam.PolicyDocument.from_json(s3_bm_framework_policies_in_json(S3_PR_BUCKET))
codebuild_inline_policies = {"code_build_batch_policy": code_build_batch_policy,
"ec2_bm_framework_policy": ec2_bm_framework_policy,
"ssm_bm_framework_policy": ssm_bm_framework_policy,
"s3_read_write_policy_prod_bucket": s3_read_write_policy_prod_bucket,
"s3_read_write_policy_pr_bucket": s3_read_write_policy_pr_bucket,
"s3_bm_framework_policy_prod_bucket": s3_bm_framework_policy_prod_bucket,
"s3_bm_framework_policy_pr_bucket": s3_bm_framework_policy_pr_bucket}
"ssm_bm_framework_policy": ssm_bm_framework_policy}
codebuild_role = iam.Role(scope=self,
id="{}-codebuild-role".format(id),
assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"),
Expand All @@ -84,28 +73,6 @@ def __init__(self,

PruneStaleGitHubBuilds(scope=self, id="PruneStaleGitHubBuilds", project=project)

# use boto3 to determine if a bucket with the name that we want exists, and if it doesn't, create it
s3_res = boto3.resource('s3')
prod_bucket = s3_res.Bucket(S3_PROD_BUCKET)
pr_bucket = s3_res.Bucket(S3_PR_BUCKET)
try:
s3_res.meta.client.head_bucket(Bucket=prod_bucket.name)
except ClientError:
production_results_s3 = s3.Bucket(self, "{}-prod-bucket".format(id),
bucket_name=S3_PROD_BUCKET,
enforce_ssl=True)

production_results_s3.grant_put(codebuild_role)

try:
s3_res.meta.client.head_bucket(Bucket=pr_bucket.name)
except ClientError:
pr_results_s3 = s3.Bucket(self, "{}-pr-bucket".format(id),
bucket_name=S3_PR_BUCKET,
enforce_ssl=True)

pr_results_s3.grant_put(codebuild_role)

# use boto3 to determine if a cloudwatch logs group with the name we want exists, and if it doesn't, create it
logs_client = boto3.client('logs', region_name=AWS_REGION)
try:
Expand Down
2 changes: 2 additions & 0 deletions tests/ci/cdk/cdk/windows_docker_image_build_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ def __init__(self,
])

# Define Windows EC2 instance, where the SSM document will be executed.
# TODO: This AMI does not have docker installed by default anymore. Find another Windows machine
# that has docker by default or update the ssm document to properly install docker.
machine_image = ec2.MachineImage.latest_windows(
ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE)
vpc = ec2.Vpc(scope=self, id="{}-vpc".format(id))
Expand Down
12 changes: 8 additions & 4 deletions tests/ci/cdk/run-cdk.sh
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,10 @@ function create_linux_docker_img_build_stack() {
destroy_docker_img_build_stack
# Deploy aws-lc ci stacks.
# When repeatedly deploy, error 'EIP failed Reason: Maximum number of addresses has been reached' can happen.
# https://forums.aws.amazon.com/thread.jspa?messageID=952368
# Workaround: go to AWS EIP console, release unused IP.
#
# Workaround: The default quota amount is 5 EIP addresses. Go to
# https://us-west-2.console.aws.amazon.com/servicequotas/home/services/ec2/quotas and request a quota
# increase for "EC2-VPC Elastic IPs".
cdk deploy aws-lc-docker-image-build-linux --require-approval never
}

Expand All @@ -77,8 +79,10 @@ function create_win_docker_img_build_stack() {
destroy_docker_img_build_stack
# Deploy aws-lc ci stacks.
# When repeatedly deploy, error 'EIP failed Reason: Maximum number of addresses has been reached' can happen.
# https://forums.aws.amazon.com/thread.jspa?messageID=952368
# Workaround: go to AWS EIP console, release unused IP.
#
# Workaround: The default quota amount is 5 EIP addresses. Go to
# https://us-west-2.console.aws.amazon.com/servicequotas/home/services/ec2/quotas and request a quota
# increase for "EC2-VPC Elastic IPs".
cdk deploy aws-lc-docker-image-build-windows --require-approval never
}

Expand Down
46 changes: 16 additions & 30 deletions tests/ci/cdk/util/iam_policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from util.metadata import AWS_REGION, AWS_ACCOUNT

def ec2_policies_in_json():
def ec2_policies_in_json(ec2_role_name, ec2_security_group_id, ec2_subnet_id, ec2_vpc_id):
"""
Define an IAM policy that gives permissions for starting, stopping, and getting details of EC2 instances and their Vpcs
:return: an IAM policy statement in json.
Expand All @@ -16,36 +16,21 @@ def ec2_policies_in_json():
{
"Effect": "Allow",
"Action": [
"iam:PassRole",
"ec2:RunInstances",
"ec2:TerminateInstances",
"ec2:CreateTags",
"ec2:DescribeInstances",
"ec2:DescribeVpcs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets"
],
"Resource": [
"*"
]
}]
}

def s3_bm_framework_policies_in_json(s3_bucket_name):
"""
Define an IAM policy that gives some s3 permissions needed by the EC2 instances of the benchmarking framework
"""
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:DeleteObject"
],
"Resource": [
"arn:aws:s3:::{}".format(s3_bucket_name),
"arn:aws:s3:::{}/*".format(s3_bucket_name)
"arn:aws:iam::{}:role/{}".format(AWS_ACCOUNT, ec2_role_name),
"arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT),
"arn:aws:ec2:{}::image/*".format(AWS_REGION),
"arn:aws:ec2:{}:{}:network-interface/*".format(AWS_REGION, AWS_ACCOUNT),
"arn:aws:ec2:{}:{}:volume/*".format(AWS_REGION, AWS_ACCOUNT),
"arn:aws:ec2:{}:{}:security-group/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_security_group_id),
"arn:aws:ec2:{}:{}:subnet/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_subnet_id),
"arn:aws:ec2:{}:{}:vpc/{}".format(AWS_REGION, AWS_ACCOUNT, ec2_vpc_id),
]
}]
}
Expand All @@ -62,15 +47,16 @@ def ssm_policies_in_json():
{
"Effect": "Allow",
"Action": [
"iam:PassRole",
"ssm:SendCommand",
"ssm:CreateDocument",
"ssm:DeleteDocument",
"ssm:SendCommand",
"ssm:ListCommands",
"ssm:DescribeInstanceInformation"
],
"Resource": [
"*"
"arn:aws:ec2:{}:{}:instance/*".format(AWS_REGION, AWS_ACCOUNT), # Needed for ssm:SendCommand
"arn:aws:ssm:{}:{}:*".format(AWS_REGION, AWS_ACCOUNT),
"arn:aws:ssm:{}:{}:document/*".format(AWS_REGION, AWS_ACCOUNT),
]
}]
}
Expand Down Expand Up @@ -161,8 +147,8 @@ def s3_read_write_policy_in_json(s3_bucket_name):
{
"Effect": "Allow",
"Action": [
"s3:Put*",
"s3:Get*"
"s3:PutObject",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::{}/*".format(s3_bucket_name)
Expand Down
7 changes: 1 addition & 6 deletions tests/ci/run_ec2_test_framework.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ generate_ssm_document_file() {
create_ec2_instances() {
local instance_id
instance_id="$(aws ec2 run-instances --image-id "$1" --count 1 \
--instance-type "$2" --security-group-ids "${sg_id}" --subnet-id "${subnet_id}" \
--instance-type "$2" --security-group-ids "${EC2_SECURITY_GROUP_ID}" --subnet-id "${EC2_SUBNET_ID}" \
--block-device-mappings 'DeviceName="/dev/sda1",Ebs={DeleteOnTermination=True,VolumeSize=200}' \
--tag-specifications 'ResourceType="instance",Tags=[{Key="Name",Value="ec2-test-'"$CODEBUILD_WEBHOOK_TRIGGER"'"}]' \
--iam-instance-profile Name=aws-lc-ci-ec2-test-framework-ec2-profile \
Expand All @@ -53,11 +53,6 @@ export ec2_instance_type="$2"
export ecr_docker_tag="$3"
export s3_bucket_name="aws-lc-codebuild"

# Get resources for ec2 instances. These were created with the cdk script.
vpc_id="$(aws ec2 describe-vpcs --filter Name=tag:Name,Values=aws-lc-ci-ec2-test-framework/aws-lc-ci-ec2-test-framework-ec2-vpc --query Vpcs[*].VpcId --output text)"
sg_id="$(aws ec2 describe-security-groups --filter Name=vpc-id,Values="${vpc_id}" --filter Name=group-name,Values=codebuild_ec2_sg --query SecurityGroups[*].GroupId --output text)"
subnet_id="$(aws ec2 describe-subnets --filter Name=vpc-id,Values="${vpc_id}" --filter Name=state,Values=available --filter Name=tag:Name,Values=aws-lc-ci-ec2-test-framework/aws-lc-ci-ec2-test-framework-ec2-vpc/PrivateSubnet1 --query Subnets[*].SubnetId --output text)"

# create the ssm documents that will be used for the various ssm commands
generate_ssm_document_file

Expand Down

0 comments on commit 5ede432

Please sign in to comment.