diff --git a/docs/cluster-providers/generic-cluster-provider.md b/docs/cluster-providers/generic-cluster-provider.md index ad44785a5..bc6c77479 100644 --- a/docs/cluster-providers/generic-cluster-provider.md +++ b/docs/cluster-providers/generic-cluster-provider.md @@ -104,7 +104,7 @@ const clusterProvider = new blueprints.GenericClusterProvider({ mastersRole: blueprints.getResource(context => { return new iam.Role(context.scope, 'AdminRole', { assumedBy: new AccountRootPrincipal() }); }), - securityGroup: blueprints.getNamedResource("my-cluster-security-group"), // assumed to be register as a resource provider under name my-cluster-security-group + securityGroup: blueprints.getNamedResource("my-cluster-security-group") as ec2.ISecurityGroup, // assumed to be register as a resource provider under name my-cluster-security-group managedNodeGroups: [ { id: "mng1", @@ -119,7 +119,7 @@ const clusterProvider = new blueprints.GenericClusterProvider({ EksBlueprint.builder() .resourceProvider("my-cluster-security-group", { provide(context: blueprints.ResourceContext) : ec2.ISecurityGroup { - return ec2.SecurityGroup.fromSecurityGroupId(this, 'SG', 'sg-12345', { mutable: false }); // example for look up + return ec2.SecurityGroup.fromSecurityGroupId(context.scope, 'SG', 'sg-12345', { mutable: false }); // example for look up } }) .clusterProvider(clusterProvider) diff --git a/docs/cluster-providers/import-cluster-provider.md b/docs/cluster-providers/import-cluster-provider.md new file mode 100644 index 000000000..c2d161fee --- /dev/null +++ b/docs/cluster-providers/import-cluster-provider.md @@ -0,0 +1,108 @@ +# Import Cluster Provider + +The `ImportClusterProvider` allows you to import an existing EKS cluster into your blueprint. Importing an existing cluster at present will allow adding certain add-ons and limited team capabilities. + +## Usage + +The framework provides a couple of convenience methods to instantiate the `ImportClusterProvider` by leveraging the SDK API call to describe the cluster. + +### Option 1 + +Recommended option is to get the cluster information through the `DescribeCluster` API (requires `eks:DescribeCluster` permission at build-time) and then use it to instantiate the `ImportClusterProvider` and **(very important)** to set up the blueprint VPC. + +Make sure VPC is set to the VPC of the imported cluster, otherwise the blueprint by default will create a new VPC, which will be redundant and cause problems with some of the add-ons. + +**Note:** `blueprints.describeCluster() is an asynchronous function, you should either use `await` or handle promise resolution chain. + +```typescript +const clusterName = "quickstart-cluster"; +const region = "us-east-2"; + +const sdkCluster = await blueprints.describeCluster(clusterName, region); // get cluster information using EKS APIs + +/** + * Assumes the supplied role is registered in the target cluster for kubectl access. + */ +const importClusterProvider = blueprints.ImportClusterProvider.fromClusterAttributes( + sdkCluster, + blueprints.getResource(context => new blueprints.LookupRoleProvider(kubectlRoleName).provide(context)) +); + +const vpcId = sdkCluster.resourcesVpcConfig?.vpcId; + +blueprints.EksBlueprint.builder() + .clusterProvider(importClusterProvider) + .resourceProvider(blueprints.GlobalResources.Vpc, new blueprints.VpcProvider(vpcId)) // this is required with import cluster provider + +``` + +### Option 2 + +This option is convenient if you already know the VPC Id of the target cluster. It also requires `eks:DescribeCluster` permission at build-time: + +```typescript +const clusterName = "quickstart-cluster"; +const region = "us-east-2"; + +const kubectlRole: iam.IRole = blueprints.getNamedResource('my-role'); + +const importClusterProvider2 = await blueprints.ImportClusterProvider.fromClusterLookup(clusterName, region, kubectlRole); // note await here + +const vpcId = ...; // you can always get it with blueprints.describeCluster(clusterName, region); + +blueprints.EksBlueprint.builder() + .clusterProvider(importClusterProvider2) + .resourceProvider('my-role', new blueprints.LookupRoleProvider('my-role')) + .resourceProvider(blueprints.GlobalResources.Vpc, new blueprints.VpcProvider(vpcId)) +``` + +### Option 3 + +Unlike the other options, this one does not require any special permissions at build time, however it requires passing all the required information to the import cluster provider. +OIDC provider is expected to be passed in as well if you are planning to leverage IRSA with your blueprint. The OIDC provider is expected to be registered in the imported cluster already, otherwise IRSA won't work. + + +```typescript + +const importClusterProvider3 = new ImportClusterProvider({ + clusterName: 'my-existing-cluster', + version: KubernetesVersion.V1_26, + clusterEndpoint: 'https://B792B88BC60999B1A37D.gr7.us-east-2.eks.amazonaws.com', + openIdConnectProvider: getResource(context => + new LookupOpenIdConnectProvider('https://oidc.eks.us-east-2.amazonaws.com/id/B792B88BC60999B1A37D').provide(context)), + clusterCertificateAuthorityData: 'S0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCasdd234................', + kubectlRoleArn: 'arn:...', +}); + +const vpcId = ...; + +blueprints.EksBlueprint.builder() + .clusterProvider(importClusterProvider3) + .resourceProvider(blueprints.GlobalResources.Vpc, new blueprints.VpcProvider(vpcId)) +``` + +## Configuration + +The `ImportClusterProvider` supports the following configuration options: + +| Prop | Description | +|-----------------------|-------------| +| clusterName | Cluster name +| version | EKS version of the target cluster +| clusterEndpoint | The API Server endpoint URL +| openIdConnectProvider | An Open ID Connect provider for this cluster that can be used to configure service accounts. You can either import an existing provider using `LookupOpenIdConnectProvider`, or create a new provider using new custom resource provider to call `new eks.OpenIdConnectProvider` +| clusterCertificateAuthorityData | The certificate-authority-data for your cluster. +| kubectlRoleArn | An IAM role with cluster administrator and "system:masters" permissions. + + +## Known Limitations + +The following add-ons will not work with the `ImportClusterProvider` due to the inability (at present) of the imported clusters to modify `aws-auth` ConfigMap and mutate cluster authentication: +* `ClusterAutoScalerAddOn` +* `AwsBatchAddOn` +* `EmrEksAddOn` +* `KarpenterAddOn` + +Teams can be added to the cluster and will perform all of the team functionality except cluster access due to the same inability to mutate cluster access. + +At the moment, there are no examples to add extra capacity to the imported clusters like node groups. \ No newline at end of file diff --git a/docs/cluster-providers/index.md b/docs/cluster-providers/index.md index 8daa258d4..a3bd7b302 100644 --- a/docs/cluster-providers/index.md +++ b/docs/cluster-providers/index.md @@ -9,7 +9,8 @@ The framework currently provides support for the following Cluster Providers: | [`GenericClusterProvider`](./generic-cluster-provider) | Provisions an EKS cluster with one or more managed or Auto Scaling groups as well as Fargate Profiles. | [`AsgClusterProvider`](./asg-cluster-provider) | Provisions an EKS cluster with an Auto Scaling group used for compute capacity. | [`MngClusterProvider`](./mng-cluster-provider) | Provisions an EKS cluster with a Managed Node group for compute capacity. -| [`FargateClusterProviders`](./fargate-cluster-provider) | Provisions an EKS cluster which leverages AWS Fargate to run Kubernetes pods. +| [`FargateClusterProvider`](./fargate-cluster-provider) | Provisions an EKS cluster which leverages AWS Fargate to run Kubernetes pods. +| [`ImportClusterProvider`](./import-cluster-provider) | Imports an existing EKS cluster into the blueprint allowing capabilities to add (certain) add-ons and teams. By default, the framework will leverage the `MngClusterProvider` which creates a single managed node group. diff --git a/examples/teams/team-troi/index.ts b/examples/teams/team-troi/index.ts index 519877fbd..6852c865b 100644 --- a/examples/teams/team-troi/index.ts +++ b/examples/teams/team-troi/index.ts @@ -30,7 +30,7 @@ export class TeamTroi implements Team { new cdk.CfnOutput(stack, this.name + '-sa-iam-role', { value: sa.role.roleArn }); } - setupNamespacePolicies(cluster: eks.Cluster) : eks.KubernetesManifest { + setupNamespacePolicies(cluster: eks.ICluster) : eks.KubernetesManifest { const quotaName = this.name + "-quota"; return cluster.addManifest(quotaName, { apiVersion: 'v1', diff --git a/lib/addons/aws-batch-on-eks/index.ts b/lib/addons/aws-batch-on-eks/index.ts index 24af5bf22..442b6bbcf 100644 --- a/lib/addons/aws-batch-on-eks/index.ts +++ b/lib/addons/aws-batch-on-eks/index.ts @@ -1,5 +1,7 @@ +import assert = require("assert"); import { ClusterAddOn, ClusterInfo } from "../../spi"; import { Stack } from "aws-cdk-lib"; +import { Cluster } from "aws-cdk-lib/aws-eks"; import { CfnServiceLinkedRole, IRole, Role } from "aws-cdk-lib/aws-iam"; import { Construct } from "constructs"; @@ -7,7 +9,8 @@ const BATCH = 'aws-batch'; export class AwsBatchAddOn implements ClusterAddOn { deploy(clusterInfo: ClusterInfo): Promise { - const cluster = clusterInfo.cluster; + assert(clusterInfo.cluster instanceof Cluster, "AwsBatchAddOn cannot be used with imported clusters"); + const cluster: Cluster = clusterInfo.cluster; const roleNameforBatch = 'AWSServiceRoleForBatch'; const slrCheck = Role.fromRoleName(cluster.stack, 'BatchServiceLinkedRole', roleNameforBatch); diff --git a/lib/addons/aws-node-termination-handler/index.ts b/lib/addons/aws-node-termination-handler/index.ts index 7b161ecac..dea200293 100644 --- a/lib/addons/aws-node-termination-handler/index.ts +++ b/lib/addons/aws-node-termination-handler/index.ts @@ -1,6 +1,6 @@ import { AutoScalingGroup, LifecycleHook, LifecycleTransition } from 'aws-cdk-lib/aws-autoscaling'; import { QueueHook } from 'aws-cdk-lib/aws-autoscaling-hooktargets'; -import { Cluster, ServiceAccount } from 'aws-cdk-lib/aws-eks'; +import { ICluster, ServiceAccount } from 'aws-cdk-lib/aws-eks'; import { EventPattern, Rule } from 'aws-cdk-lib/aws-events'; import { SqsQueue } from 'aws-cdk-lib/aws-events-targets'; import * as iam from 'aws-cdk-lib/aws-iam'; @@ -122,7 +122,7 @@ export class AwsNodeTerminationHandlerAddOn extends HelmAddOn { * @param asgCapacity * @returns Helm values */ - private configureQueueMode(cluster: Cluster, serviceAccount: ServiceAccount, asgCapacity: AutoScalingGroup[], karpenter: Promise | undefined): any { + private configureQueueMode(cluster: ICluster, serviceAccount: ServiceAccount, asgCapacity: AutoScalingGroup[], karpenter: Promise | undefined): any { const queue = new Queue(cluster.stack, "aws-nth-queue", { retentionPeriod: Duration.minutes(5) }); diff --git a/lib/addons/ebs-csi-driver/index.ts b/lib/addons/ebs-csi-driver/index.ts index c314d9dff..62cbe0d0e 100644 --- a/lib/addons/ebs-csi-driver/index.ts +++ b/lib/addons/ebs-csi-driver/index.ts @@ -7,7 +7,7 @@ import { getEbsDriverPolicyDocument } from "./iam-policy"; /** * Interface for EBS CSI Driver EKS add-on options */ -interface EbsCsiDriverAddOnProps { +export interface EbsCsiDriverAddOnProps { /** * Version of the driver to deploy */ diff --git a/lib/addons/emr-on-eks/index.ts b/lib/addons/emr-on-eks/index.ts index 114cd3c87..a62f4c3df 100644 --- a/lib/addons/emr-on-eks/index.ts +++ b/lib/addons/emr-on-eks/index.ts @@ -1,12 +1,14 @@ +import assert = require("assert"); import { ClusterAddOn, ClusterInfo } from "../../spi"; import { Stack } from "aws-cdk-lib"; +import { Cluster } from "aws-cdk-lib/aws-eks"; import { CfnServiceLinkedRole, IRole, Role } from "aws-cdk-lib/aws-iam"; import { Construct } from "constructs"; export class EmrEksAddOn implements ClusterAddOn { deploy(clusterInfo: ClusterInfo): Promise { - const cluster = clusterInfo.cluster; - + assert(clusterInfo.cluster instanceof Cluster, "EmrEksAddOn cannot be used with imported clusters as it requires changes to the cluster authentication."); + const cluster: Cluster = clusterInfo.cluster; /* * Create the service role used by EMR on EKS @@ -35,6 +37,5 @@ export class EmrEksAddOn implements ClusterAddOn { ); return Promise.resolve(emrOnEksSlr); - } } \ No newline at end of file diff --git a/lib/addons/karpenter/index.ts b/lib/addons/karpenter/index.ts index 39c66a9e2..84699aab7 100644 --- a/lib/addons/karpenter/index.ts +++ b/lib/addons/karpenter/index.ts @@ -137,7 +137,8 @@ export class KarpenterAddOn extends HelmAddOn { @conflictsWith('ClusterAutoScalerAddOn') deploy(clusterInfo: ClusterInfo): Promise { - const cluster = clusterInfo.cluster; + assert(clusterInfo.cluster instanceof Cluster, "KarpenterAddOn cannot be used with imported clusters as it requires changes to the cluster authentication."); + const cluster : Cluster = clusterInfo.cluster; const endpoint = cluster.clusterEndpoint; const name = cluster.clusterName; diff --git a/lib/addons/vpc-cni/index.ts b/lib/addons/vpc-cni/index.ts index 1114e5daf..20f080835 100644 --- a/lib/addons/vpc-cni/index.ts +++ b/lib/addons/vpc-cni/index.ts @@ -180,6 +180,12 @@ export interface VpcCniAddOnProps { * */ serviceAccountPolicies?: iam.IManagedPolicy[]; + + /** + * Version of the add-on to use. Must match the version of the cluster where it + * will be deployed. + */ + version?: string; } diff --git a/lib/cluster-providers/generic-cluster-provider.ts b/lib/cluster-providers/generic-cluster-provider.ts index 8f5b76b6f..d5000ff31 100644 --- a/lib/cluster-providers/generic-cluster-provider.ts +++ b/lib/cluster-providers/generic-cluster-provider.ts @@ -21,6 +21,31 @@ export function clusterBuilder() { return new ClusterBuilder(); } +/** + * Function that contains logic to map the correct kunbectl layer based on the passed in version. + * @param scope in whch the kubectl layer must be created + * @param version EKS version + * @returns ILayerVersion or undefined + */ +export function selectKubectlLayer(scope: Construct, version: eks.KubernetesVersion): ILayerVersion | undefined { + switch(version) { + case eks.KubernetesVersion.V1_23: + return new KubectlV23Layer(scope, "kubectllayer23"); + case eks.KubernetesVersion.V1_24: + return new KubectlV24Layer(scope, "kubectllayer24"); + case eks.KubernetesVersion.V1_25: + return new KubectlV25Layer(scope, "kubectllayer25"); + case eks.KubernetesVersion.V1_26: + return new KubectlV26Layer(scope, "kubectllayer26"); + } + + const minor = version.version.split('.')[1]; + + if(minor && parseInt(minor, 10) > 26) { + return new KubectlV26Layer(scope, "kubectllayer26"); // for all versions above 1.25 use 1.25 kubectl (unless explicitly supported in CDK) + } + return undefined; +} /** * Properties for the generic cluster provider, containing definitions of managed node groups, * auto-scaling groups, fargate profiles. @@ -282,23 +307,7 @@ export class GenericClusterProvider implements ClusterProvider { * @returns */ protected getKubectlLayer(scope: Construct, version: eks.KubernetesVersion) : ILayerVersion | undefined { - switch(version) { - case eks.KubernetesVersion.V1_23: - return new KubectlV23Layer(scope, "kubectllayer23"); - case eks.KubernetesVersion.V1_24: - return new KubectlV24Layer(scope, "kubectllayer24"); - case eks.KubernetesVersion.V1_25: - return new KubectlV25Layer(scope, "kubectllayer25"); - case eks.KubernetesVersion.V1_26: - return new KubectlV26Layer(scope, "kubectllayer26"); - } - - const minor = version.version.split('.')[1]; - - if(minor && parseInt(minor, 10) > 26) { - return new KubectlV26Layer(scope, "kubectllayer26"); // for all versions above 1.25 use 1.25 kubectl (unless explicitly supported in CDK) - } - return undefined; + return selectKubectlLayer(scope, version); } /** diff --git a/lib/cluster-providers/import-cluster-provider.ts b/lib/cluster-providers/import-cluster-provider.ts new file mode 100644 index 000000000..af2601fff --- /dev/null +++ b/lib/cluster-providers/import-cluster-provider.ts @@ -0,0 +1,107 @@ +import { ClusterInfo, ClusterProvider } from "../spi"; +import { selectKubectlLayer } from "./generic-cluster-provider"; +import { IVpc } from "aws-cdk-lib/aws-ec2"; +import * as eks from "aws-cdk-lib/aws-eks"; +import { IRole } from "aws-cdk-lib/aws-iam"; +import { IKey } from "aws-cdk-lib/aws-kms"; +import * as sdk from "@aws-sdk/client-eks"; +import { Construct } from "constructs"; +import { getResource } from "../resource-providers/utils"; +import { LookupOpenIdConnectProvider } from "../resource-providers"; +import { logger } from "../utils"; + + +/** + * Properties object for the ImportClusterProvider. + */ +export interface ImportClusterProviderProps extends Omit { + /** + * This property is needed as it drives selection of certain add-on versions as well as kubectl layer. + */ + version: eks.KubernetesVersion; +} + +/** + * Importing cluster into the blueprint enabling limited blueprinting capabilities such as adding certain addons, + * teams. + */ +export class ImportClusterProvider implements ClusterProvider { + + constructor(private readonly props: ImportClusterProviderProps) { } + + /** + * Implements contract method to create a cluster, by importing an existing cluster. + * @param scope + * @param vpc + * @param _secretsEncryptionKey + * @returns + */ + createCluster(scope: Construct, vpc: IVpc, _secretsEncryptionKey?: IKey | undefined): ClusterInfo { + const props = { ...this.props, vpc }; + + if(! props.kubectlLayer) { + props.kubectlLayer = selectKubectlLayer(scope, props.version); + } + + const existingCluster = eks.Cluster.fromClusterAttributes(scope, 'imported-cluster-' + this.props.clusterName, props); + return new ClusterInfo(existingCluster, this.props.version); + } + + + /** + * Requires iam permission to eks.DescribeCluster at build time. Retrieves the cluster information using DescribeCluster api and + * creates an import cluster provider. + * @param clusterName name of the cluster + * @param region target rego + * @param kubectlRole iam Role that provides access to the cluster API (kubectl). The CDK custom resource should be able to assume the role + * which in some cases may require trust policy for the account root principal. + * @returns the cluster provider with the import cluster configuration + */ + public static async fromClusterLookup(clusterName: string, region: string, kubectlRole: IRole): + Promise { + + const sdkCluster = await describeCluster(clusterName, process.env.CDK_DEFAULT_REGION!); + return this.fromClusterAttributes(sdkCluster, kubectlRole); + } + + /** + * Creates a cluster provider for an existing cluster based on the passed result of the describe cluster command. + * @param sdkCluster + * @param kubectlRole + * @returns + */ + public static fromClusterAttributes(sdkCluster: sdk.Cluster, kubectlRole: IRole): ClusterProvider { + return new ImportClusterProvider({ + clusterName: sdkCluster.name!, + version: eks.KubernetesVersion.of(sdkCluster.version!), + clusterEndpoint: sdkCluster.endpoint, + openIdConnectProvider: getResource(context => + new LookupOpenIdConnectProvider(sdkCluster.identity!.oidc!.issuer!).provide(context)), + clusterCertificateAuthorityData: sdkCluster.certificateAuthority?.data, + kubectlRoleArn: kubectlRole.roleArn, + }); + } +} + +/** + * Wraps API call to get the data on the eks.Cluster. + * @param clusterName + * @param region + * @returns + */ +export async function describeCluster(clusterName: string, region: string): Promise { + const client = new sdk.EKSClient({ region }); + const input: sdk.DescribeClusterRequest = { + name: clusterName + }; + + const command = new sdk.DescribeClusterCommand(input); + try { + const response = await client.send(command); + return response.cluster!; + } + catch (error) { + logger.error(error); + throw error; + } +} \ No newline at end of file diff --git a/lib/cluster-providers/index.ts b/lib/cluster-providers/index.ts index 43b1160ee..9270fb17f 100644 --- a/lib/cluster-providers/index.ts +++ b/lib/cluster-providers/index.ts @@ -1,6 +1,7 @@ export * from './asg-cluster-provider'; export * from './fargate-cluster-provider'; export * from "./generic-cluster-provider"; +export * from "./import-cluster-provider"; export * from './mng-cluster-provider'; export * from './types'; diff --git a/lib/cluster-providers/mng-cluster-provider.ts b/lib/cluster-providers/mng-cluster-provider.ts index ac1e9719f..d17f04335 100644 --- a/lib/cluster-providers/mng-cluster-provider.ts +++ b/lib/cluster-providers/mng-cluster-provider.ts @@ -74,5 +74,5 @@ export function assertEC2NodeGroup(clusterInfo: ClusterInfo, source: string): ek if(clusterInfo.autoscalingGroups != undefined && clusterInfo.autoscalingGroups.length > 0) { return clusterInfo.autoscalingGroups; } - throw new Error(`${source} is supported with EKS EC2 only`); + throw new Error(`${source} is supported with EKS EC2 only and is not supported for imported clusters`); } \ No newline at end of file diff --git a/lib/resource-providers/iam.ts b/lib/resource-providers/iam.ts index 803b0b1fa..1ca3d2c84 100644 --- a/lib/resource-providers/iam.ts +++ b/lib/resource-providers/iam.ts @@ -1,16 +1,18 @@ import * as iam from 'aws-cdk-lib/aws-iam'; import { IManagedPolicy } from 'aws-cdk-lib/aws-iam'; import * as spi from '../spi'; +import assert = require('assert'); /** * Role provider that imports an existing role, performing its lookup by the provided name. */ export class LookupRoleProvider implements spi.ResourceProvider { - constructor(private readonly roleName: string) { } + constructor(private readonly roleName: string, private readonly mutable?: boolean) { } provide(context: spi.ResourceContext): iam.IRole { - return iam.Role.fromRoleName(context.scope, `${this.roleName}-iam-provider`, this.roleName); + return iam.Role.fromRoleName(context.scope, `${this.roleName}-iam-provider`, this.roleName, + { mutable: this.mutable }); } } @@ -34,4 +36,28 @@ export class CreateRoleProvider implements spi.ResourceProvider { managedPolicies: this.policies }); } +} + +const httpsPrefix = 'https://'; + +/** + * OpenIdConnect provider can lookup an existing OpenIdConnectProvider based on the OIDC provider URL. + */ +export class LookupOpenIdConnectProvider implements spi.ResourceProvider { + + protected readonly id: string; + + constructor(readonly url: string, id?: string) { + const urlParts = url.split('/'); + assert(url.startsWith(httpsPrefix) && urlParts[urlParts.length - 1], "Invalid OIDC provider URL format"); + this.id = id ?? urlParts[urlParts.length - 1]; + } + + provide(context: spi.ResourceContext): iam.IOpenIdConnectProvider { + return iam.OpenIdConnectProvider.fromOpenIdConnectProviderArn( + context.scope, + this.id, + `arn:aws:iam::${context.scope.account}:oidc-provider/${this.url.substring(httpsPrefix.length)}` + ); + } } \ No newline at end of file diff --git a/lib/spi/types.ts b/lib/spi/types.ts index 51278004f..328c7f985 100644 --- a/lib/spi/types.ts +++ b/lib/spi/types.ts @@ -1,7 +1,7 @@ import * as assert from "assert"; import * as cdk from 'aws-cdk-lib'; import { AutoScalingGroup } from 'aws-cdk-lib/aws-autoscaling'; -import { Cluster, FargateProfile, KubernetesVersion, Nodegroup } from 'aws-cdk-lib/aws-eks'; +import * as eks from 'aws-cdk-lib/aws-eks'; import { Construct, IConstruct } from 'constructs'; import { ResourceProvider } from '.'; import { EksBlueprintProps } from '../stacks'; @@ -130,8 +130,8 @@ export class ClusterInfo { * Constructor for ClusterInfo * @param props */ - constructor(readonly cluster: Cluster, readonly version: KubernetesVersion, - readonly nodeGroups?: Nodegroup[], readonly autoscalingGroups?: AutoScalingGroup[], readonly fargateProfiles?: FargateProfile[]) { + constructor(readonly cluster: eks.ICluster, readonly version: eks.KubernetesVersion, + readonly nodeGroups?: eks.Nodegroup[], readonly autoscalingGroups?: AutoScalingGroup[], readonly fargateProfiles?: eks.FargateProfile[]) { this.cluster = cluster; this.provisionedAddOns = new Map(); this.scheduledAddOns = new Map>(); diff --git a/lib/teams/aws-batch/aws-batch-on-eks-team.ts b/lib/teams/aws-batch/aws-batch-on-eks-team.ts index 65f2bb88b..bb0ba3464 100644 --- a/lib/teams/aws-batch/aws-batch-on-eks-team.ts +++ b/lib/teams/aws-batch/aws-batch-on-eks-team.ts @@ -74,13 +74,13 @@ export interface BatchEksTeamProps extends TeamProps { * The maximum number of Amazon EC2 vCPUs that an environment can reach. */ maxvCpus: number, - + /** * List of instance types - can be a list that contains Instance Type family (i.e. "m5") or a specific Type (i.e. "m5.4xlarge") */ instanceTypes: string[], } - + /** * Name of the Job Queue */ @@ -104,7 +104,7 @@ export class BatchEksTeam extends ApplicationTeam { * @param {BatchEksTeamProps} props the Batch team definition {@link BatchEksTeamProps} */ constructor(props: BatchEksTeamProps) { - const teamProps = {...defaultProps, ...props}; + const teamProps = { ...defaultProps, ...props }; super(teamProps); this.batchTeamProps = teamProps; } @@ -130,7 +130,7 @@ export class BatchEksTeam extends ApplicationTeam { computeEnv.node.addDependency(statement); // Create a job queue - const jobQueue = new batch.CfnJobQueue(clusterInfo.cluster.stack,'batch-eks-job-queue',{ + const jobQueue = new batch.CfnJobQueue(clusterInfo.cluster.stack, 'batch-eks-job-queue', { jobQueueName: this.batchTeamProps.jobQueueName!, priority: priority, computeEnvironmentOrder: [ @@ -171,7 +171,7 @@ export class BatchEksTeam extends ApplicationTeam { const kubectlProvider = new KubectlProvider(clusterInfo); const statement = kubectlProvider.addManifest(manifestDeployment); - + return statement; } @@ -184,12 +184,12 @@ export class BatchEksTeam extends ApplicationTeam { instanceTypes: string[]; }): batch.CfnComputeEnvironment { const nodeGroups = assertEC2NodeGroup(clusterInfo, "Batch Compute Environment"); - const ngRoleNames = nodeGroups.map((ng: eks.Nodegroup | AutoScalingGroup) => {return ng.role.roleName;}); + const ngRoleNames = nodeGroups.map((ng: eks.Nodegroup | AutoScalingGroup) => { return ng.role.roleName; }); const cluster = clusterInfo.cluster; const ngRole = ngRoleNames[0]; // Need to create instance profile for the nodegroup role - const instanceProfile = new iam.CfnInstanceProfile(cluster, 'ng-role-instance-profile',{ + const instanceProfile = new iam.CfnInstanceProfile(cluster, 'ng-role-instance-profile', { instanceProfileName: ngRole, roles: [ngRole] }); @@ -208,7 +208,7 @@ export class BatchEksTeam extends ApplicationTeam { minvCpus: computeResources.minvCpus, maxvCpus: computeResources.maxvCpus, instanceTypes: computeResources.instanceTypes, - subnets: cluster.vpc.publicSubnets.map((e: ec2.ISubnet) => {return e.subnetId;}), + subnets: cluster.vpc.publicSubnets.map((e: ec2.ISubnet) => { return e.subnetId; }), securityGroupIds: [cluster.clusterSecurityGroupId], instanceRole: ngRole, } @@ -217,5 +217,5 @@ export class BatchEksTeam extends ApplicationTeam { batchComputeEnv.node.addDependency(instanceProfile); return batchComputeEnv; - } + } } diff --git a/lib/teams/emr-on-eks-team.ts b/lib/teams/emr-on-eks-team.ts index 716f018d1..ef11f00cc 100644 --- a/lib/teams/emr-on-eks-team.ts +++ b/lib/teams/emr-on-eks-team.ts @@ -1,4 +1,4 @@ -import { Cluster } from "aws-cdk-lib/aws-eks"; +import { ICluster } from "aws-cdk-lib/aws-eks"; import { FederatedPrincipal, IManagedPolicy, ManagedPolicy, PolicyStatement, Role } from "aws-cdk-lib/aws-iam"; import { Aws, CfnJson, CfnOutput, CfnTag } from "aws-cdk-lib"; import * as nsutils from '../utils/namespace-utils'; @@ -187,7 +187,7 @@ export class EmrEksTeam extends ApplicationTeam { * @param name Name of the IAM role * @returns Role */ - private createExecutionRole(cluster: Cluster, policy: IManagedPolicy, namespace: string, name: string): Role { + private createExecutionRole(cluster: ICluster, policy: IManagedPolicy, namespace: string, name: string): Role { const stack = cluster.stack; diff --git a/lib/teams/team.ts b/lib/teams/team.ts index 0b944319b..ff08efb22 100644 --- a/lib/teams/team.ts +++ b/lib/teams/team.ts @@ -1,11 +1,12 @@ import { CfnOutput } from 'aws-cdk-lib'; -import { KubernetesManifest, ServiceAccount } from 'aws-cdk-lib/aws-eks'; +import { Cluster, KubernetesManifest, ServiceAccount } from 'aws-cdk-lib/aws-eks'; import * as iam from 'aws-cdk-lib/aws-iam'; import { IRole } from "aws-cdk-lib/aws-iam"; import { CsiSecretProps, SecretProviderClass } from '../addons/secrets-store/csi-driver-provider-aws-secrets'; import { ClusterInfo, Team, Values } from '../spi'; import { applyYamlFromDir } from '../utils/yaml-utils'; import { DefaultTeamRoles } from './default-team-roles'; +import { logger } from '../utils'; /** * Team properties. @@ -115,8 +116,14 @@ export class ApplicationTeam implements Team { } protected defaultSetupAccess(clusterInfo: ClusterInfo) { - const props = this.teamProps; - const awsAuth = clusterInfo.cluster.awsAuth; + const props = this.teamProps; + + if(!(clusterInfo.cluster instanceof Cluster)) { + logger.warn(`Team ${props.name} has cluster access updates that are not supported with imported clusters` ); + return; + } + const eksCluster : Cluster = clusterInfo.cluster; + const awsAuth = eksCluster.awsAuth; const users = this.teamProps.users ?? []; const teamRole = this.getOrCreateRole(clusterInfo, users, props.userRoleArn); @@ -132,15 +139,20 @@ export class ApplicationTeam implements Team { * @param clusterInfo */ protected defaultSetupAdminAccess(clusterInfo: ClusterInfo) { - const props = this.teamProps; - const awsAuth = clusterInfo.cluster.awsAuth; + const props = this.teamProps; + + if(!(clusterInfo.cluster instanceof Cluster)) { + logger.warn(`Team ${props.name} has cluster access updates that are not supported with imported clusters` ); + return; + } const admins = this.teamProps.users ?? []; const adminRole = this.getOrCreateRole(clusterInfo, admins, props.userRoleArn); new CfnOutput(clusterInfo.cluster.stack, props.name + ' team admin ', { value: adminRole ? adminRole.roleArn : "none" }); if (adminRole) { - awsAuth.addMastersRole(adminRole, this.teamProps.name); + const eksCluster: Cluster = clusterInfo.cluster; + eksCluster.awsAuth.addMastersRole(adminRole, this.teamProps.name); } } diff --git a/lib/utils/cluster-utils.ts b/lib/utils/cluster-utils.ts index 0481813a3..10bfccf47 100644 --- a/lib/utils/cluster-utils.ts +++ b/lib/utils/cluster-utils.ts @@ -9,7 +9,7 @@ import { ClusterInfo } from "../spi"; const CONTROL_PLANE_LOG_TYPES = ['api','audit','authenticator','controllerManager','scheduler']; // Enables logs for the cluster. -export function setupClusterLogging(stack: Stack, cluster: eks.Cluster, enableLogTypes: string[]): void { +export function setupClusterLogging(stack: Stack, cluster: eks.ICluster, enableLogTypes: string[]): void { if(!enableLogTypes.every(val => CONTROL_PLANE_LOG_TYPES.includes(val))){ throw new Error('You have included an invalid Control Plane Log Type.'); } diff --git a/lib/utils/namespace-utils.ts b/lib/utils/namespace-utils.ts index 1266930cd..471841047 100644 --- a/lib/utils/namespace-utils.ts +++ b/lib/utils/namespace-utils.ts @@ -11,7 +11,7 @@ import { Values } from "../spi"; * @param prune * @returns KubernetesManifest */ -export function createNamespace(name: string, cluster: eks.Cluster, overwrite?: boolean, prune?: boolean, annotations?: Values, labels? : Values) { +export function createNamespace(name: string, cluster: eks.ICluster, overwrite?: boolean, prune?: boolean, annotations?: Values, labels? : Values) { return new KubernetesManifest(cluster.stack, `${name}-namespace-struct`, { cluster: cluster, manifest: [{ diff --git a/lib/utils/sa-utils.ts b/lib/utils/sa-utils.ts index 1d2f69d20..4ad7c078e 100644 --- a/lib/utils/sa-utils.ts +++ b/lib/utils/sa-utils.ts @@ -1,4 +1,4 @@ -import { Cluster, ServiceAccount } from "aws-cdk-lib/aws-eks"; +import { ICluster, ServiceAccount } from "aws-cdk-lib/aws-eks"; import { CfnJson, Names } from "aws-cdk-lib"; import * as eks from "aws-cdk-lib/aws-eks"; import * as iam from "aws-cdk-lib/aws-iam"; @@ -9,7 +9,7 @@ import { Construct } from 'constructs'; * @param clusterInfo * @returns sa */ -export function createServiceAccount(cluster: Cluster, name: string, namespace: string, policyDocument: iam.PolicyDocument): ServiceAccount { +export function createServiceAccount(cluster: ICluster, name: string, namespace: string, policyDocument: iam.PolicyDocument): ServiceAccount { const policy = new iam.ManagedPolicy(cluster, `${name}-managed-policy`, { document: policyDocument }); @@ -18,7 +18,7 @@ export function createServiceAccount(cluster: Cluster, name: string, namespace: } -export function createServiceAccountWithPolicy(cluster: Cluster, name: string, namespace: string, ...policies: iam.IManagedPolicy[]): ServiceAccount { +export function createServiceAccountWithPolicy(cluster: ICluster, name: string, namespace: string, ...policies: iam.IManagedPolicy[]): ServiceAccount { const sa = cluster.addServiceAccount(`${name}-sa`, { name: name, namespace: namespace diff --git a/lib/utils/yaml-utils.ts b/lib/utils/yaml-utils.ts index e7ca85ea1..48435ecd1 100644 --- a/lib/utils/yaml-utils.ts +++ b/lib/utils/yaml-utils.ts @@ -11,7 +11,7 @@ import * as yaml from 'js-yaml'; * @param cluster * @param namespaceManifest */ -export function applyYamlFromDir(dir: string, cluster: eks.Cluster, namespaceManifest: KubernetesManifest): void { +export function applyYamlFromDir(dir: string, cluster: eks.ICluster, namespaceManifest: KubernetesManifest): void { fs.readdirSync(dir, { encoding: 'utf8' }).forEach((file, index) => { if (file.split('.').pop() == 'yaml') { const data = fs.readFileSync(dir + file, 'utf8'); diff --git a/mkdocs.yml b/mkdocs.yml index fd23a43bf..a23424717 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -85,6 +85,7 @@ nav: - ASG Cluster Provider: 'cluster-providers/asg-cluster-provider.md' - MNG Cluster Provider: 'cluster-providers/mng-cluster-provider.md' - Fargate Cluster Provider: 'cluster-providers/fargate-cluster-provider.md' + - Import Cluster Provider: 'cluster-providers/import-cluster-provider.md' - Resource Providers: - Overview: 'resource-providers/index.md' - AMP Resource Provider: 'resource-providers/amp-provider.md' diff --git a/package.json b/package.json index a127c44e7..6d967a84a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@aws-quickstart/eks-blueprints", - "version": "1.9.1", + "version": "1.9.2", "license": "Apache-2.0", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -37,6 +37,7 @@ "@aws-cdk/lambda-layer-kubectl-v24": "^2.0.202", "@aws-cdk/lambda-layer-kubectl-v25": "^2.0.3", "@aws-cdk/lambda-layer-kubectl-v26": "^2.0.0", + "@aws-sdk/client-eks": "^3.360.0", "@aws-sdk/client-secrets-manager": "^3.316.0", "@types/assert": "^1.5.6", "@types/bcrypt": "^5.0.0",