From 5900ec24ec7201ebf17f4e1bfbceeb217f0dfdfe Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Tue, 16 Mar 2021 17:45:34 -0700 Subject: [PATCH] implemented EKS infra provisioning with CFN Signed-off-by: Sam Alba --- examples/aws-eks/README.md | 7 + .../aws-eks/cfn_template_eks_controlplane.cue | 433 ++++++++++++++++++ .../aws-eks/cfn_template_eks_nodegroup.cue | 311 +++++++++++++ examples/aws-eks/cue.mod/pkg/dagger.io | 1 + examples/aws-eks/infrastructure.cue | 43 ++ examples/aws-eks/main.cue | 21 + 6 files changed, 816 insertions(+) create mode 100644 examples/aws-eks/README.md create mode 100644 examples/aws-eks/cfn_template_eks_controlplane.cue create mode 100644 examples/aws-eks/cfn_template_eks_nodegroup.cue create mode 120000 examples/aws-eks/cue.mod/pkg/dagger.io create mode 100644 examples/aws-eks/infrastructure.cue create mode 100644 examples/aws-eks/main.cue diff --git a/examples/aws-eks/README.md b/examples/aws-eks/README.md new file mode 100644 index 00000000..867d8f4f --- /dev/null +++ b/examples/aws-eks/README.md @@ -0,0 +1,7 @@ +# Kubernetes on AWS (EKS) + +## How to run + +```sh + +``` diff --git a/examples/aws-eks/cfn_template_eks_controlplane.cue b/examples/aws-eks/cfn_template_eks_controlplane.cue new file mode 100644 index 00000000..f1d57659 --- /dev/null +++ b/examples/aws-eks/cfn_template_eks_controlplane.cue @@ -0,0 +1,433 @@ +package main + +#CFNTemplate: eksControlPlane: { + AWSTemplateFormatVersion: "2010-09-09" + Description: "Amazon EKS Sample VPC - Private and Public subnets" + Parameters: { + VpcBlock: { + Type: "String" + Default: "192.168.0.0/16" + Description: "The CIDR range for the VPC. This should be a valid private (RFC 1918) CIDR range." + } + PublicSubnet01Block: { + Type: "String" + Default: "192.168.0.0/18" + Description: "CidrBlock for public subnet 01 within the VPC" + } + PublicSubnet02Block: { + Type: "String" + Default: "192.168.64.0/18" + Description: "CidrBlock for public subnet 02 within the VPC" + } + PrivateSubnet01Block: { + Type: "String" + Default: "192.168.128.0/18" + Description: "CidrBlock for private subnet 01 within the VPC" + } + PrivateSubnet02Block: { + Type: "String" + Default: "192.168.192.0/18" + Description: "CidrBlock for private subnet 02 within the VPC" + } + ClusterName: { + Type: "String" + Description: "The EKS cluster name" + } + // EKSIAMRoleName: { + // Type: "String" + // Description: "The name of the IAM role for the EKS service to assume" + // } + } + Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [ + { + Label: default: "Worker Network Configuration" + Parameters: [ + "VpcBlock", + "PublicSubnet01Block", + "PublicSubnet02Block", + "PrivateSubnet01Block", + "PrivateSubnet02Block", + ] + }, + ] + Resources: { + VPC: { + Type: "AWS::EC2::VPC" + Properties: { + CidrBlock: Ref: "VpcBlock" + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-VPC" + }, + ] + } + } + InternetGateway: Type: "AWS::EC2::InternetGateway" + VPCGatewayAttachment: { + Type: "AWS::EC2::VPCGatewayAttachment" + Properties: { + InternetGatewayId: Ref: "InternetGateway" + VpcId: Ref: "VPC" + } + } + PublicRouteTable: { + Type: "AWS::EC2::RouteTable" + Properties: { + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Public Subnets" + }, + { + Key: "Network" + Value: "Public" + }, + ] + } + } + PrivateRouteTable01: { + Type: "AWS::EC2::RouteTable" + Properties: { + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Private Subnet AZ1" + }, + { + Key: "Network" + Value: "Private01" + }, + ] + } + } + PrivateRouteTable02: { + Type: "AWS::EC2::RouteTable" + Properties: { + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Private Subnet AZ2" + }, + { + Key: "Network" + Value: "Private02" + }, + ] + } + } + PublicRoute: { + DependsOn: "VPCGatewayAttachment" + Type: "AWS::EC2::Route" + Properties: { + RouteTableId: Ref: "PublicRouteTable" + DestinationCidrBlock: "0.0.0.0/0" + GatewayId: Ref: "InternetGateway" + } + } + PrivateRoute01: { + DependsOn: [ + "VPCGatewayAttachment", + "NatGateway01", + ] + Type: "AWS::EC2::Route" + Properties: { + RouteTableId: Ref: "PrivateRouteTable01" + DestinationCidrBlock: "0.0.0.0/0" + NatGatewayId: Ref: "NatGateway01" + } + } + PrivateRoute02: { + DependsOn: [ + "VPCGatewayAttachment", + "NatGateway02", + ] + Type: "AWS::EC2::Route" + Properties: { + RouteTableId: Ref: "PrivateRouteTable02" + DestinationCidrBlock: "0.0.0.0/0" + NatGatewayId: Ref: "NatGateway02" + } + } + NatGateway01: { + DependsOn: [ + "NatGatewayEIP1", + "PublicSubnet01", + "VPCGatewayAttachment", + ] + Type: "AWS::EC2::NatGateway" + Properties: { + AllocationId: "Fn::GetAtt": [ + "NatGatewayEIP1", + "AllocationId", + ] + SubnetId: Ref: "PublicSubnet01" + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ1" + }, + ] + } + } + NatGateway02: { + DependsOn: [ + "NatGatewayEIP2", + "PublicSubnet02", + "VPCGatewayAttachment", + ] + Type: "AWS::EC2::NatGateway" + Properties: { + AllocationId: "Fn::GetAtt": [ + "NatGatewayEIP2", + "AllocationId", + ] + SubnetId: Ref: "PublicSubnet02" + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ2" + }, + ] + } + } + NatGatewayEIP1: { + DependsOn: [ + "VPCGatewayAttachment", + ] + Type: "AWS::EC2::EIP" + Properties: Domain: "vpc" + } + NatGatewayEIP2: { + DependsOn: [ + "VPCGatewayAttachment", + ] + Type: "AWS::EC2::EIP" + Properties: Domain: "vpc" + } + PublicSubnet01: { + Type: "AWS::EC2::Subnet" + Metadata: Comment: "Subnet 01" + Properties: { + AvailabilityZone: "Fn::Select": [ + "0", + { + "Fn::GetAZs": Ref: "AWS::Region" + }, + ] + CidrBlock: Ref: "PublicSubnet01Block" + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet01" + }, + ] + } + } + PublicSubnet02: { + Type: "AWS::EC2::Subnet" + Metadata: Comment: "Subnet 02" + Properties: { + AvailabilityZone: "Fn::Select": [ + "1", + { + "Fn::GetAZs": Ref: "AWS::Region" + }, + ] + CidrBlock: Ref: "PublicSubnet02Block" + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet02" + }, + ] + } + } + PrivateSubnet01: { + Type: "AWS::EC2::Subnet" + Metadata: Comment: "Subnet 03" + Properties: { + AvailabilityZone: "Fn::Select": [ + "0", + { + "Fn::GetAZs": Ref: "AWS::Region" + }, + ] + CidrBlock: Ref: "PrivateSubnet01Block" + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet01" + }, + ] + } + } + PrivateSubnet02: { + Type: "AWS::EC2::Subnet" + Metadata: Comment: "Private Subnet 02" + Properties: { + AvailabilityZone: "Fn::Select": [ + "1", + { + "Fn::GetAZs": Ref: "AWS::Region" + }, + ] + CidrBlock: Ref: "PrivateSubnet02Block" + VpcId: Ref: "VPC" + Tags: [ + { + Key: "Name" + Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet02" + }, + ] + } + } + PublicSubnet01RouteTableAssociation: { + Type: "AWS::EC2::SubnetRouteTableAssociation" + Properties: { + SubnetId: Ref: "PublicSubnet01" + RouteTableId: Ref: "PublicRouteTable" + } + } + PublicSubnet02RouteTableAssociation: { + Type: "AWS::EC2::SubnetRouteTableAssociation" + Properties: { + SubnetId: Ref: "PublicSubnet02" + RouteTableId: Ref: "PublicRouteTable" + } + } + PrivateSubnet01RouteTableAssociation: { + Type: "AWS::EC2::SubnetRouteTableAssociation" + Properties: { + SubnetId: Ref: "PrivateSubnet01" + RouteTableId: Ref: "PrivateRouteTable01" + } + } + PrivateSubnet02RouteTableAssociation: { + Type: "AWS::EC2::SubnetRouteTableAssociation" + Properties: { + SubnetId: Ref: "PrivateSubnet02" + RouteTableId: Ref: "PrivateRouteTable02" + } + } + ControlPlaneSecurityGroup: { + Type: "AWS::EC2::SecurityGroup" + Properties: { + GroupDescription: "Cluster communication with worker nodes" + VpcId: Ref: "VPC" + } + } + EKSIAMRole: { + Type: "AWS::IAM::Role" + Properties: { + AssumeRolePolicyDocument: Statement: [ + { + Effect: "Allow" + Principal: Service: [ + "eks.amazonaws.com", + ] + Action: [ + "sts:AssumeRole", + ] + + }, + ] + // RoleName: Ref: "EKSIAMRoleName" + ManagedPolicyArns: [ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", + ] + } + } + EKSCluster: { + Type: "AWS::EKS::Cluster" + Properties: { + Name: Ref: "ClusterName" + RoleArn: "Fn::GetAtt": ["EKSIAMRole", "Arn"] + ResourcesVpcConfig: { + SecurityGroupIds: [{Ref: "ControlPlaneSecurityGroup"}] + SubnetIds: [ + {Ref: "PublicSubnet01"}, + {Ref: "PublicSubnet02"}, + {Ref: "PrivateSubnet01"}, + {Ref: "PrivateSubnet02"}, + ] + } + } + DependsOn: ["EKSIAMRole", "PublicSubnet01", "PublicSubnet02", "PrivateSubnet01", "PrivateSubnet02", "ControlPlaneSecurityGroup"] + } + } + Outputs: { + SubnetIds: { + Description: "Subnets IDs in the VPC" + Value: "Fn::Join": [ + ",", + [ + { + Ref: "PublicSubnet01" + }, + { + Ref: "PublicSubnet02" + }, + { + Ref: "PrivateSubnet01" + }, + { + Ref: "PrivateSubnet02" + }, + ], + ] + } + PublicSubnets: { + Description: "List of the public subnets" + Value: "Fn::Join": [ + ",", + [ + { + Ref: "PublicSubnet01" + }, + { + Ref: "PublicSubnet02" + }, + ], + ] + } + PrivateSubnets: { + Description: "List of the private subnets" + Value: "Fn::Join": [ + ",", + [ + { + Ref: "PrivateSubnet01" + }, + { + Ref: "PrivateSubnet02" + }, + ], + ] + } + DefaultSecurityGroup: { + Description: "Security group for the cluster control plane communication with worker nodes" + Value: "Fn::Join": [ + ",", + [ + { + Ref: "ControlPlaneSecurityGroup" + }, + ], + ] + } + VPC: { + Description: "The VPC Id" + Value: Ref: "VPC" + } + } +} diff --git a/examples/aws-eks/cfn_template_eks_nodegroup.cue b/examples/aws-eks/cfn_template_eks_nodegroup.cue new file mode 100644 index 00000000..528f71c6 --- /dev/null +++ b/examples/aws-eks/cfn_template_eks_nodegroup.cue @@ -0,0 +1,311 @@ +package main + +#CFNTemplate: eksNodeGroup: { + AWSTemplateFormatVersion: "2010-09-09" + Description: "Amazon EKS - Node Group" + Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [ + { + Label: default: "EKS Cluster" + Parameters: [ + "ClusterName", + "ClusterControlPlaneSecurityGroup", + ] + }, + { + Label: default: "Worker Node Configuration" + Parameters: [ + "NodeGroupName", + "NodeAutoScalingGroupMinSize", + "NodeAutoScalingGroupDesiredCapacity", + "NodeAutoScalingGroupMaxSize", + "NodeInstanceType", + "NodeImageIdSSMParam", + "NodeImageId", + "NodeVolumeSize", + // "KeyName", + "BootstrapArguments", + ] + }, + { + Label: default: "Worker Network Configuration" + Parameters: [ + "VpcId", + "Subnets", + ] + }, + ] + Parameters: { + BootstrapArguments: { + Type: "String" + Default: "" + Description: "Arguments to pass to the bootstrap script. See files/bootstrap.sh in https://github.com/awslabs/amazon-eks-ami" + } + ClusterControlPlaneSecurityGroup: { + Type: "AWS::EC2::SecurityGroup::Id" + Description: "The security group of the cluster control plane." + } + ClusterName: { + Type: "String" + Description: "The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster." + } + // KeyName: { + // Type: "AWS::EC2::KeyPair::KeyName" + // Description: "The EC2 Key Pair to allow SSH access to the instances" + // } + NodeAutoScalingGroupDesiredCapacity: { + Type: "Number" + Default: 3 + Description: "Desired capacity of Node Group ASG." + } + NodeAutoScalingGroupMaxSize: { + Type: "Number" + Default: 4 + Description: "Maximum size of Node Group ASG. Set to at least 1 greater than NodeAutoScalingGroupDesiredCapacity." + } + NodeAutoScalingGroupMinSize: { + Type: "Number" + Default: 1 + Description: "Minimum size of Node Group ASG." + } + NodeGroupName: { + Type: "String" + Description: "Unique identifier for the Node Group." + } + NodeImageId: { + Type: "String" + Default: "" + Description: "(Optional) Specify your own custom image ID. This value overrides any AWS Systems Manager Parameter Store value specified above." + } + NodeImageIdSSMParam: { + Type: "AWS::SSM::Parameter::Value" + Default: "/aws/service/eks/optimized-ami/1.19/amazon-linux-2/recommended/image_id" + Description: "AWS Systems Manager Parameter Store parameter of the AMI ID for the worker node instances." + } + NodeInstanceType: { + Type: "String" + Default: "t3.medium" + ConstraintDescription: "Must be a valid EC2 instance type" + Description: "EC2 instance type for the node instances" + } + NodeVolumeSize: { + Type: "Number" + Default: 20 + Description: "Node volume size" + } + Subnets: { + Type: "List" + Description: "The subnets where workers can be created." + } + VpcId: { + Type: "AWS::EC2::VPC::Id" + Description: "The VPC of the worker instances" + } + } + Conditions: HasNodeImageId: "Fn::Not": [ + { + "Fn::Equals": [ + { + Ref: "NodeImageId" + }, + "", + ] + }, + ] + Resources: { + NodeInstanceRole: { + Type: "AWS::IAM::Role" + Properties: { + AssumeRolePolicyDocument: { + Version: "2012-10-17" + Statement: [ + { + Effect: "Allow" + Principal: Service: [ + "ec2.amazonaws.com", + ] + Action: [ + "sts:AssumeRole", + ] + }, + ] + } + ManagedPolicyArns: [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + ] + Path: "/" + } + } + NodeInstanceProfile: { + Type: "AWS::IAM::InstanceProfile" + Properties: { + Path: "/" + Roles: [ + { + Ref: "NodeInstanceRole" + }, + ] + } + } + NodeSecurityGroup: { + Type: "AWS::EC2::SecurityGroup" + Properties: { + GroupDescription: "Security group for all nodes in the cluster" + Tags: [ + { + Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}" + Value: "owned" + }, + ] + VpcId: Ref: "VpcId" + } + } + NodeSecurityGroupIngress: { + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: "NodeSecurityGroup" + Properties: { + Description: "Allow node to communicate with each other" + FromPort: 0 + GroupId: Ref: "NodeSecurityGroup" + IpProtocol: "-1" + SourceSecurityGroupId: Ref: "NodeSecurityGroup" + ToPort: 65535 + } + } + ClusterControlPlaneSecurityGroupIngress: { + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: "NodeSecurityGroup" + Properties: { + Description: "Allow pods to communicate with the cluster API Server" + FromPort: 443 + GroupId: Ref: "ClusterControlPlaneSecurityGroup" + IpProtocol: "tcp" + SourceSecurityGroupId: Ref: "NodeSecurityGroup" + ToPort: 443 + } + } + ControlPlaneEgressToNodeSecurityGroup: { + Type: "AWS::EC2::SecurityGroupEgress" + DependsOn: "NodeSecurityGroup" + Properties: { + Description: "Allow the cluster control plane to communicate with worker Kubelet and pods" + DestinationSecurityGroupId: Ref: "NodeSecurityGroup" + FromPort: 1025 + GroupId: Ref: "ClusterControlPlaneSecurityGroup" + IpProtocol: "tcp" + ToPort: 65535 + } + } + ControlPlaneEgressToNodeSecurityGroupOn443: { + Type: "AWS::EC2::SecurityGroupEgress" + DependsOn: "NodeSecurityGroup" + Properties: { + Description: "Allow the cluster control plane to communicate with pods running extension API servers on port 443" + DestinationSecurityGroupId: Ref: "NodeSecurityGroup" + FromPort: 443 + GroupId: Ref: "ClusterControlPlaneSecurityGroup" + IpProtocol: "tcp" + ToPort: 443 + } + } + NodeSecurityGroupFromControlPlaneIngress: { + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: "NodeSecurityGroup" + Properties: { + Description: "Allow worker Kubelets and pods to receive communication from the cluster control plane" + FromPort: 1025 + GroupId: Ref: "NodeSecurityGroup" + IpProtocol: "tcp" + SourceSecurityGroupId: Ref: "ClusterControlPlaneSecurityGroup" + ToPort: 65535 + } + } + NodeSecurityGroupFromControlPlaneOn443Ingress: { + Type: "AWS::EC2::SecurityGroupIngress" + DependsOn: "NodeSecurityGroup" + Properties: { + Description: "Allow pods running extension API servers on port 443 to receive communication from cluster control plane" + FromPort: 443 + GroupId: Ref: "NodeSecurityGroup" + IpProtocol: "tcp" + SourceSecurityGroupId: Ref: "ClusterControlPlaneSecurityGroup" + ToPort: 443 + } + } + NodeLaunchConfig: { + Type: "AWS::AutoScaling::LaunchConfiguration" + Properties: { + AssociatePublicIpAddress: "true" + BlockDeviceMappings: [ + { + DeviceName: "/dev/xvda" + Ebs: { + DeleteOnTermination: true + VolumeSize: Ref: "NodeVolumeSize" + VolumeType: "gp2" + } + }, + ] + IamInstanceProfile: Ref: "NodeInstanceProfile" + ImageId: "Fn::If": [ + "HasNodeImageId", + { + Ref: "NodeImageId" + }, + { + Ref: "NodeImageIdSSMParam" + }, + ] + InstanceType: Ref: "NodeInstanceType" + // KeyName: Ref: "KeyName" + SecurityGroups: [ + { + Ref: "NodeSecurityGroup" + }, + ] + UserData: "Fn::Base64": "Fn::Sub": "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}\n/opt/aws/bin/cfn-signal --exit-code $? \\\n --stack ${AWS::StackName} \\\n --resource NodeGroup \\\n --region ${AWS::Region}\n" + } + } + NodeGroup: { + Type: "AWS::AutoScaling::AutoScalingGroup" + Properties: { + DesiredCapacity: Ref: "NodeAutoScalingGroupDesiredCapacity" + LaunchConfigurationName: Ref: "NodeLaunchConfig" + MaxSize: Ref: "NodeAutoScalingGroupMaxSize" + MinSize: Ref: "NodeAutoScalingGroupMinSize" + Tags: [ + { + Key: "Name" + PropagateAtLaunch: "true" + Value: "Fn::Sub": "${ClusterName}-${NodeGroupName}-Node" + }, + { + Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}" + PropagateAtLaunch: "true" + Value: "owned" + }, + ] + VPCZoneIdentifier: Ref: "Subnets" + } + UpdatePolicy: AutoScalingRollingUpdate: { + MaxBatchSize: "1" + MinInstancesInService: Ref: "NodeAutoScalingGroupDesiredCapacity" + PauseTime: "PT5M" + } + } + } + Outputs: { + NodeInstanceRole: { + Description: "The node instance role" + Value: "Fn::GetAtt": [ + "NodeInstanceRole", + "Arn", + ] + } + NodeSecurityGroup: { + Description: "The security group for the node group" + Value: Ref: "NodeSecurityGroup" + } + } +} diff --git a/examples/aws-eks/cue.mod/pkg/dagger.io b/examples/aws-eks/cue.mod/pkg/dagger.io new file mode 120000 index 00000000..1aafa4de --- /dev/null +++ b/examples/aws-eks/cue.mod/pkg/dagger.io @@ -0,0 +1 @@ +../../../../stdlib \ No newline at end of file diff --git a/examples/aws-eks/infrastructure.cue b/examples/aws-eks/infrastructure.cue new file mode 100644 index 00000000..1083e9a2 --- /dev/null +++ b/examples/aws-eks/infrastructure.cue @@ -0,0 +1,43 @@ +package main + +import ( + "encoding/json" + + "dagger.io/aws" + "dagger.io/aws/cloudformation" +) + +#Infrastructure: { + awsConfig: aws.#Config + namePrefix: *"dagger-example-" | string + // Cluster size is 1 for example (to limit resources) + workerNodeCapacity: *1 | >1 + workerNodeInstanceType: *"t3.small" | string + + let clusterName = "\(namePrefix)eks-cluster" + + eksControlPlane: cloudformation.#Stack & { + config: awsConfig + source: json.Marshal(#CFNTemplate.eksControlPlane) + stackName: "\(namePrefix)eks-controlplane" + neverUpdate: true + parameters: ClusterName: clusterName + } + + eksNodeGroup: cloudformation.#Stack & { + config: awsConfig + source: json.Marshal(#CFNTemplate.eksNodeGroup) + stackName: "\(namePrefix)eks-nodegroup" + neverUpdate: true + parameters: { + ClusterName: clusterName + ClusterControlPlaneSecurityGroup: eksControlPlane.outputs.DefaultSecurityGroup + NodeAutoScalingGroupDesiredCapacity: 1 + NodeAutoScalingGroupMaxSize: NodeAutoScalingGroupDesiredCapacity + 1 + NodeGroupName: "\(namePrefix)eks-nodegroup" + NodeInstanceType: workerNodeInstanceType + VpcId: eksControlPlane.outputs.VPC + Subnets: eksControlPlane.outputs.SubnetIds + } + } +} diff --git a/examples/aws-eks/main.cue b/examples/aws-eks/main.cue new file mode 100644 index 00000000..d126a729 --- /dev/null +++ b/examples/aws-eks/main.cue @@ -0,0 +1,21 @@ +package main + +import ( + "dagger.io/aws" +) + +// Fill using: +// --input-string awsConfig.accessKey=XXX +// --input-string awsConfig.secretKey=XXX +awsConfig: aws.#Config & { + region: *"us-east-2" | string +} + +// Auto-provision an EKS cluster: +// - VPC, Nat Gateways, Subnets, Security Group +// - EKS Cluster +// - Instance Node Group: auto-scaling-group, ec2 instances, etc... +// base config can be changed (number of EC2 instances, types, etc...) +infra: #Infrastructure & { + "awsConfig": awsConfig +}