cue modules: move stdlib to pkg/alpha.dagger.io

In preparation for Europa, we will vendor multiple CUE modules:

- `pkg/alpha.dagger.io`: legacy non-europa packages
- `pkg/dagger.io`: core Europa packages
- `pkg/universe.dagger.io`: Europa universe

Signed-off-by: Andrea Luzzardi <aluzzardi@gmail.com>
This commit is contained in:
Andrea Luzzardi
2022-01-11 12:40:02 -08:00
parent e5316f3a1e
commit 282759c0e5
277 changed files with 33 additions and 31 deletions

View File

@@ -0,0 +1,151 @@
// AWS base package
package aws
import (
"regexp"
"alpha.dagger.io/dagger"
"alpha.dagger.io/dagger/op"
"alpha.dagger.io/alpine"
)
// AWS Config shared by all AWS packages
#Config: {
// AWS region
region: dagger.#Input & {string}
// AWS access key
accessKey: dagger.#Input & {dagger.#Secret}
// AWS secret key
secretKey: dagger.#Input & {dagger.#Secret}
// AWS localstack mode
localMode: dagger.#Input & {*false | bool}
}
// Configuration specific to CLI v1
#V1: {
config: #Config
package: [string]: string | bool
version: dagger.#Input & {*"1.19" | string}
#up: [
op.#Load & {
from: alpine.#Image & {
"package": package
"package": bash: true
"package": jq: true
"package": curl: true
"package": "aws-cli": "=~\( version )"
if config.localMode != false {
package: "py3-pip": true
}
}
},
]
}
// Configuration specific to CLI v2
#V2: {
config: #Config
package: [string]: string | bool
version: dagger.#Input & {*"2.1.29" | string}
#up: [
op.#Load & {
from: alpine.#Image & {
"package": package
"package": bash: true
"package": jq: true
"package": curl: true
"package": binutils: true
if config.localMode != false {
package: "py3-pip": true
}
}
},
//https://stackoverflow.com/a/61268529
op.#Exec & {
env: AWS_CLI_VERSION: version
args: ["/bin/bash", "--noprofile", "--norc", "-eo", "pipefail", "-c",
#"""
curl -sL https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub -o /etc/apk/keys/sgerrand.rsa.pub
curl -sLO https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.31-r0/glibc-2.31-r0.apk
curl -sLO https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.31-r0/glibc-bin-2.31-r0.apk
curl -sLO https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.31-r0/glibc-i18n-2.31-r0.apk
apk add --no-cache glibc-2.31-r0.apk glibc-bin-2.31-r0.apk glibc-i18n-2.31-r0.apk
/usr/glibc-compat/bin/localedef -i en_US -f UTF-8 en_US.UTF-8
curl -s https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${AWS_CLI_VERSION}.zip -o awscliv2.zip
unzip awscliv2.zip > /dev/null
./aws/install
rm -rf awscliv2.zip aws /usr/local/aws-cli/v2/*/dist/aws_completer /usr/local/aws-cli/v2/*/dist/awscli/data/ac.index \
usr/local/aws-cli/v2/*/dist/awscli/examples glibc-*.apk
"""#]
},
]
}
#CLI: {
config: #Config
package: [string]: string | bool
version: dagger.#Input & {*"1.19" | string}
_isV2: regexp.Match("^2.*$", version)
#up: [
op.#Load & {
if _isV2 == false {
from: #V1 & {
"config": config
"package": package
"version": version
}
}
if _isV2 == true {
from: #V2 & {
"config": config
"package": package
"version": version
}
}
},
op.#Exec & {
if config.localMode == false {
args: ["/bin/bash", "--noprofile", "--norc", "-eo", "pipefail", "-c",
#"""
aws configure set aws_access_key_id "$(cat /run/secrets/access_key)"
aws configure set aws_secret_access_key "$(cat /run/secrets/secret_key)"
aws configure set default.region "$AWS_DEFAULT_REGION"
aws configure set default.cli_pager ""
aws configure set default.output "json"
"""#]
}
if config.localMode == true {
args: [ "/bin/bash", "--noprofile", "--norc", "-eo", "pipefail", "-c",
#"""
# Download awscli v3 and override aws
pip install awscli-local==0.14
mv /usr/bin/awslocal /usr/bin/aws
# Configure
mkdir -p ~/.aws/
# Set up ~/.aws/config
echo "[default]" > ~/.aws/config
echo "region = $AWS_DEFAULT_REGION" >> ~/.aws/config
echo "cli_pager =" >> ~/.aws/config
echo "output = json" >> ~/.aws/config
# Set up ~/.aws/credentials
echo "[default]" > ~/.aws/credentials
echo "aws_access_key_id = $(cat /run/secrets/access_key)" >> ~/.aws/credentials
echo "aws_secret_access_key = $(cat /run/secrets/secret_key)" >> ~/.aws/credentials
"""#]
}
mount: "/run/secrets/access_key": secret: config.accessKey
mount: "/run/secrets/secret_key": secret: config.secretKey
env: AWS_DEFAULT_REGION: config.region
},
]
}

View File

@@ -0,0 +1,94 @@
// AWS CloudFormation
package cloudformation
import (
"encoding/json"
"alpha.dagger.io/dagger/op"
"alpha.dagger.io/aws"
)
// AWS CloudFormation Stack
#Stack: {
// AWS Config
config: aws.#Config
// Source is the Cloudformation template (JSON/YAML string)
source: string @dagger(input)
// Stackname is the cloudformation stack
stackName: string @dagger(input)
// Stack parameters
parameters: {
...
} @dagger(input)
// Behavior when failure to create/update the Stack
onFailure: *"DO_NOTHING" | "ROLLBACK" | "DELETE" @dagger(input)
// Maximum waiting time until stack creation/update (in minutes)
timeout: *10 | uint @dagger(input)
// Never update the stack if already exists
neverUpdate: *false | true @dagger(input)
#files: {
"/entrypoint.sh": #Code
"/src/template.json": source
if len(parameters) > 0 {
"/src/parameters.json": json.Marshal(
[ for key, val in parameters {
ParameterKey: "\(key)"
ParameterValue: "\(val)"
}])
"/src/parameters_overrides.json": json.Marshal([ for key, val in parameters {"\(key)=\(val)"}])
}
}
outputs: {
[string]: string @dagger(output)
}
outputs: #up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Mkdir & {
path: "/src"
},
for dest, content in #files {
op.#WriteFile & {
"dest": dest
"content": content
}
},
op.#Exec & {
always: true
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"/entrypoint.sh",
]
env: {
if neverUpdate {
NEVER_UPDATE: "true"
}
STACK_NAME: stackName
TIMEOUT: "\(timeout)"
ON_FAILURE: onFailure
}
dir: "/src"
},
op.#Export & {
source: "/outputs.json"
format: "json"
},
]
}

View File

@@ -0,0 +1,108 @@
package cloudformation
#Code: #"""
set +o pipefail
aws cloudformation validate-template --template-body file:///src/template.json
parameters=""
function getOutputs() {
aws cloudformation describe-stacks \
--stack-name "$STACK_NAME" \
--query 'Stacks[].Outputs' \
--output json \
| jq '.[] | map( { (.OutputKey|tostring): .OutputValue } ) | add' \
> /outputs.json
}
# Check if the stack exists
aws cloudformation describe-stacks --stack-name "$STACK_NAME" 2>/dev/null || {
if [ -f /src/parameters.json ]; then
parameters="--parameters file:///src/parameters.json"
cat /src/parameters.json
fi
aws cloudformation create-stack \
--stack-name "$STACK_NAME" \
--template-body "file:///src/template.json" \
--capabilities CAPABILITY_IAM \
--on-failure "$ON_FAILURE" \
--timeout-in-minutes "$TIMEOUT" \
$parameters \
|| {
# Create failed, display errors
aws cloudformation describe-stack-events \
--stack-name "$STACK_NAME" \
--max-items 10 \
| >&2 jq '.StackEvents[] | select((.ResourceStatus | contains("FAILED")) or (.ResourceStatus | contains("ERROR"))) | ("===> ERROR: " + .LogicalResourceId + ": " + .ResourceStatusReason)'
exit 1
}
aws cloudformation wait stack-create-complete \
--stack-name "$STACK_NAME"
getOutputs
exit 0
}
# In case there is an action already in progress, we wait for the corresponding action to complete
wait_action=""
stack_status=$(aws cloudformation describe-stacks --stack-name "$STACK_NAME" | jq -r '.Stacks[].StackStatus')
case "$stack_status" in
"CREATE_FAILED")
echo "Deleting previous failed stack..."
aws cloudformation delete-stack --stack-name "$STACK_NAME"
aws cloudformation wait stack-delete-complete --stack-name "$STACK_NAME" || true
;;
"CREATE_IN_PROGRESS")
echo "Stack create already in progress, waiting..."
aws cloudformation wait stack-create-complete --stack-name "$STACK_NAME" || true
;;
"UPDATE_IN_PROGRESS")
# Cancel update to avoid stacks stuck in deadlock (re-apply then works)
echo "Stack update already in progress, waiting..."
aws cloudformation cancel-update-stack --stack-name "$STACK_NAME" || true
;;
"ROLLBACK_IN_PROGRESS")
echo "Stack rollback already in progress, waiting..."
aws cloudformation wait stack-rollback-complete --stack-name "$STACK_NAME" || true
;;
"DELETE_IN_PROGRESS")
echo "Stack delete already in progress, waiting..."
aws cloudformation wait stack-delete-complete --stack-name "$STACK_NAME" || true
;;
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS")
echo "Stack update almost completed, waiting..."
aws cloudformation wait stack-update-complete --stack-name "$STACK_NAME" || true
;;
esac
[ -n "$NEVER_UPDATE" ] && {
getOutputs
exit 0
}
# Stack exists, trigger an update via `deploy`
if [ -f /src/parameters_overrides.json ]; then
parameters="--parameter-overrides file:///src/parameters_overrides.json"
cat /src/parameters_overrides.json
fi
echo "Deploying stack $STACK_NAME"
aws cloudformation deploy \
--stack-name "$STACK_NAME" \
--template-file "/src/template.json" \
--capabilities CAPABILITY_IAM \
--no-fail-on-empty-changeset \
$parameters \
|| {
# Deploy failed, display errors
echo "Failed to deploy stack $STACK_NAME"
aws cloudformation describe-stack-events \
--stack-name "$STACK_NAME" \
--max-items 10 \
| >&2 jq '.StackEvents[] | select((.ResourceStatus | contains("FAILED")) or (.ResourceStatus | contains("ERROR"))) | ("===> ERROR: " + .LogicalResourceId + ": " + .ResourceStatusReason)'
exit 1
}
getOutputs
"""#

View File

@@ -0,0 +1,32 @@
// Amazon Elastic Container Registry (ECR)
package ecr
import (
"alpha.dagger.io/aws"
"alpha.dagger.io/os"
)
// Convert ECR credentials to Docker Login format
#Credentials: {
// AWS Config
config: aws.#Config
// ECR registry
username: "AWS" @dagger(output)
ctr: os.#Container & {
image: aws.#CLI & {
"config": config
}
always: true
command: "aws ecr get-login-password > /out"
}
// ECR registry secret
secret: {
os.#File & {
from: ctr
path: "/out"
}
}.contents @dagger(output)
}

View File

@@ -0,0 +1,95 @@
package ecr
import (
"alpha.dagger.io/aws"
"alpha.dagger.io/dagger/op"
"alpha.dagger.io/random"
)
TestConfig: awsConfig: aws.#Config & {
region: "us-east-2"
}
TestECR: {
localMode: TestConfig.awsConfig.localMode
suffix: random.#String & {
seed: ""
}
repository: string
if localMode == false {
repository: "125635003186.dkr.ecr.\(TestConfig.awsConfig.region).amazonaws.com/dagger-ci"
}
if localMode == true {
repository: "localhost:4510/dagger-ci"
}
tag: "test-ecr-\(suffix.out)"
creds: #Credentials & {
config: TestConfig.awsConfig
}
push: {
ref: "\(repository):\(tag)"
#up: [
op.#DockerBuild & {
dockerfile: """
FROM alpine
RUN echo \(suffix.out) > /test
"""
},
op.#DockerLogin & {
target: repository
username: creds.username
secret: creds.secret
},
op.#PushContainer & {
"ref": ref
},
]
}
pull: #up: [
op.#DockerLogin & {
target: push.ref
username: creds.username
secret: creds.secret
},
op.#FetchContainer & {
ref: push.ref
},
]
verify: #up: [
op.#Load & {
from: pull
},
op.#Exec & {
always: true
args: [
"sh", "-c", "test $(cat test) = \(suffix.out)",
]
},
]
verifyBuild: #up: [
op.#DockerLogin & {
target: push.ref
username: creds.username
secret: creds.secret
},
op.#DockerBuild & {
dockerfile: #"""
FROM \#(push.ref)
RUN test $(cat test) = \#(suffix.out)
"""#
},
]
}

View File

@@ -0,0 +1,70 @@
// AWS Elastic Container Service (ECS)
package ecs
import (
"alpha.dagger.io/aws"
)
// Task implements ecs run-task for running a single container on ECS
#Task: {
// AWS Config
config: aws.#Config
// ECS cluster name
cluster: string @dagger(input)
// Arn of the task to run
taskArn: string @dagger(input)
// Environment variables of the task
containerEnvironment: {
[string]: string @dagger(input)
}
// Container name
containerName: string @dagger(input)
// Container command to give
containerCommand: [...string] @dagger(input)
// Task role ARN
roleArn: string | *"" @dagger(input)
containerOverrides: {
containerOverrides: [{
name: containerName
if len(containerCommand) > 0 {
command: containerCommand
}
if len(containerEnvironment) > 0 {
environment: [ for k, v in containerEnvironment {
name: k
value: v
}]
}
}]
if roleArn != "" {
taskRoleArn: roleArn
}
}
aws.#Script & {
"config": config
export: "/out"
files: {
"/inputs/cluster": cluster
"/inputs/task_arn": taskArn
"/inputs/container_overrides": containerOverrides
}
code: #"""
cat /inputs/container_overrides | jq
aws ecs run-task \
--cluster "$(cat /inputs/cluster)" \
--task-definition "$(cat /inputs/task_arn)" \
--overrides "$(cat /inputs/container_overrides)" \
> /out
"""#
}
}

View File

@@ -0,0 +1,27 @@
package eks
#Code: #"""
[ -e /cache/bin/kubectl ] || {
curl -sfL https://dl.k8s.io/${KUBECTL_VERSION}/bin/linux/amd64/kubectl -o /cache/bin/kubectl \
&& chmod +x /cache/bin/kubectl
}
export KUBECONFIG=/kubeconfig
export PATH="$PATH:/cache/bin"
# Generate a kube configuration
aws eks update-kubeconfig --name "$EKS_CLUSTER"
# Figure out the kubernetes username
CONTEXT="$(kubectl config current-context)"
USER="$(kubectl config view -o json | \
jq -r ".contexts[] | select(.name==\"$CONTEXT\") | .context.user")"
# Grab a kubernetes access token
ACCESS_TOKEN="$(aws eks get-token --cluster-name "$EKS_CLUSTER" | \
jq -r .status.token)"
# Remove the user config and replace it with the token
kubectl config unset "users.${USER}"
kubectl config set-credentials "$USER" --token "$ACCESS_TOKEN"
"""#

View File

@@ -0,0 +1,61 @@
// AWS Elastic Kubernetes Service (EKS)
package eks
import (
"alpha.dagger.io/dagger/op"
"alpha.dagger.io/aws"
)
// KubeConfig config outputs a valid kube-auth-config for kubectl client
#KubeConfig: {
// AWS Config
config: aws.#Config
// EKS cluster name
clusterName: string @dagger(input)
// Kubectl version
version: *"v1.19.9" | string @dagger(input)
// kubeconfig is the generated kube configuration file
kubeconfig: {
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#WriteFile & {
dest: "/entrypoint.sh"
content: #Code
},
op.#Exec & {
always: true
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"/entrypoint.sh",
]
env: {
EKS_CLUSTER: clusterName
KUBECTL_VERSION: version
}
mount: {
"/cache/aws": "cache"
"/cache/bin": "cache"
}
},
op.#Export & {
source: "/kubeconfig"
format: "string"
},
]
} @dagger(output)
}

View File

@@ -0,0 +1,52 @@
package eks
import (
"alpha.dagger.io/aws"
"alpha.dagger.io/kubernetes"
"alpha.dagger.io/dagger/op"
)
TestConfig: awsConfig: aws.#Config & {
region: "us-east-2"
}
TestCluster: #KubeConfig & {
config: TestConfig.awsConfig
clusterName: *"dagger-example-eks-cluster" | string
}
TestEks: {
#GetPods:
"""
kubectl get pods -A
"""
#up: [
op.#Load & {
from: kubernetes.#Kubectl
},
op.#WriteFile & {
dest: "/kubeconfig"
content: TestCluster.kubeconfig
},
op.#WriteFile & {
dest: "/getPods.sh"
content: #GetPods
},
op.#Exec & {
always: true
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"/getPods.sh",
]
env: KUBECONFIG: "/kubeconfig"
},
]
}

View File

@@ -0,0 +1,85 @@
// AWS Elastic Load Balancer (ELBv2)
package elb
import (
"alpha.dagger.io/dagger/op"
"alpha.dagger.io/aws"
)
// Returns an unused rule priority (randomized in available range)
#RandomRulePriority: {
// AWS Config
config: aws.#Config
// ListenerArn
listenerArn: string @dagger(input)
// Optional vhost for reusing priorities
vhost?: string @dagger(input)
// exported priority
priority: out @dagger(output)
out: {
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Exec & {
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
#"""
if [ -s "$VHOST" ]; then
# We passed a vhost as input, try to recycle priority from previously allocated vhost
priority=$(aws elbv2 describe-rules \
--listener-arn "$LISTENER_ARN" | \
jq -r --arg vhost "$VHOST" '.Rules[] | select(.Conditions[].HostHeaderConfig.Values[] == $VHOST) | .Priority')
if [ -n "${priority}" ]; then
echo -n "${priority}" > /priority
exit 0
fi
fi
# Grab a priority random from 1-50k and check if available, retry 10 times if none available
priority=0
for i in {1..10}
do
p=$(shuf -i 1-50000 -n 1)
# Find the next priority available that we can allocate
aws elbv2 describe-rules \
--listener-arn "$LISTENER_ARN" \
| jq -e "select(.Rules[].Priority == \"${p}\") | true" && continue
priority="${p}"
break
done
if [ "${priority}" -lt 1 ]; then
echo "Error: cannot determine a Rule priority"
exit 1
fi
echo -n "${priority}" > /priority
"""#,
]
env: {
LISTENER_ARN: listenerArn
VHOST: vhost
}
},
op.#Export & {
source: "/db_created"
format: "string"
},
]
}
}

View File

@@ -0,0 +1,249 @@
// AWS Relational Database Service (RDS)
package rds
import (
"alpha.dagger.io/dagger/op"
"encoding/json"
"alpha.dagger.io/aws"
)
// Creates a new Database on an existing RDS Instance
#Database: {
// AWS Config
config: aws.#Config
// DB name
name: string @dagger(input)
// ARN of the database instance
dbArn: string @dagger(input)
// ARN of the database secret (for connecting via rds api)
secretArn: string @dagger(input)
// Database type MySQL or PostgreSQL (Aurora Serverless only)
dbType: "mysql" | "postgres" @dagger(input)
// Name of the DB created
out: {
@dagger(output)
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Exec & {
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
#"""
echo "dbType: $DB_TYPE"
sql="CREATE DATABASE \`"$NAME" \`"
if [ "$DB_TYPE" = postgres ]; then
sql="CREATE DATABASE \""$NAME"\""
fi
echo "$NAME" >> /db_created
aws rds-data execute-statement \
--resource-arn "$DB_ARN" \
--secret-arn "$SECRET_ARN" \
--sql "$sql" \
--database "$DB_TYPE" \
--no-include-result-metadata \
|& tee /tmp/out
exit_code=${PIPESTATUS[0]}
if [ $exit_code -ne 0 ]; then
grep -q "database exists\|already exists" /tmp/out || exit $exit_code
fi
"""#,
]
env: {
NAME: name
DB_ARN: dbArn
SECRET_ARN: secretArn
DB_TYPE: dbType
}
},
op.#Export & {
source: "/db_created"
format: "string"
},
]
}
}
// Creates a new user credentials on an existing RDS Instance
#User: {
// AWS Config
config: aws.#Config
// Username
username: string @dagger(input)
// Password
password: string @dagger(input)
// ARN of the database instance
dbArn: string @dagger(input)
// ARN of the database secret (for connecting via rds api)
secretArn: string @dagger(input)
// Name of the database to grants access to
grantDatabase: string | *"" @dagger(input)
// Database type MySQL or PostgreSQL (Aurora Serverless only)
dbType: "mysql" | "postgres" @dagger(input)
// Outputed username
out: {
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Exec & {
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
#"""
echo "dbType: $DB_TYPE"
sql="CREATE USER '"$USERNAME"'@'%' IDENTIFIED BY '"$PASSWORD"'"
if [ "$DB_TYPE" = postgres ]; then
sql="CREATE USER \""$USERNAME"\" WITH PASSWORD '"$PASSWORD"'"
fi
echo "$USERNAME" >> /username
aws rds-data execute-statement \
--resource-arn "$DB_ARN" \
--secret-arn "$SECRET_ARN" \
--sql "$sql" \
--database "$DB_TYPE" \
--no-include-result-metadata \
|& tee tmp/out
exit_code=${PIPESTATUS[0]}
if [ $exit_code -ne 0 ]; then
grep -q "Operation CREATE USER failed for\|ERROR" tmp/out || exit $exit_code
fi
sql="SET PASSWORD FOR '"$USERNAME"'@'%' = PASSWORD('"$PASSWORD"')"
if [ "$DB_TYPE" = postgres ]; then
sql="ALTER ROLE \""$USERNAME"\" WITH PASSWORD '"$PASSWORD"'"
fi
aws rds-data execute-statement \
--resource-arn "$DB_ARN" \
--secret-arn "$SECRET_ARN" \
--sql "$sql" \
--database "$DB_TYPE" \
--no-include-result-metadata
sql="GRANT ALL ON \`"$GRAND_DATABASE"\`.* to '"$USERNAME"'@'%'"
if [ "$DB_TYPE" = postgres ]; then
sql="GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \""$USERNAME"\"; GRANT ALL PRIVILEGES ON DATABASE \""$GRAND_DATABASE"\" to \""$USERNAME"\"; GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO \""$USERNAME"\"; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON TABLES TO \""$USERNAME"\"; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON SEQUENCES TO \""$USERNAME"\"; GRANT USAGE ON SCHEMA public TO \""$USERNAME"\";"
fi
if [ -s "$GRAND_DATABASE ]; then
aws rds-data execute-statement \
--resource-arn "$DB_ARN" \
--secret-arn "$SECRET_ARN" \
--sql "$sql" \
--database "$DB_TYPE" \
--no-include-result-metadata
fi
"""#,
]
env: {
USERNAME: username
PASSWORD: password
DB_ARN: dbArn
SECRET_ARN: secretArn
GRAND_DATABASE: grantDatabase
DB_TYPE: dbType
}
},
op.#Export & {
source: "/username"
format: "string"
},
]
} @dagger(output)
}
// Fetches information on an existing RDS Instance
#Instance: {
// AWS Config
config: aws.#Config
// ARN of the database instance
dbArn: string @dagger(input)
// DB hostname
hostname: info.hostname @dagger(output)
// DB port
port: info.port @dagger(output)
info: {
hostname: string
port: int
}
info: json.Unmarshal(out) @dagger(output)
out: {
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Exec & {
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
#"""
data=$(aws rds describe-db-clusters --filters "Name=db-cluster-id,Values=$DB_URN" )
echo "$data" | jq -r '.DBClusters[].Endpoint' > /tmp/out
echo "$data" | jq -r '.DBClusters[].Port' >> /tmp/out
cat /tmp/out | jq -sR 'split("\n") | {hostname: .[0], port: (.[1] | tonumber)}' > /out
"""#,
]
env: DB_ARN: dbArn
},
op.#Export & {
source: "/out"
format: "json"
},
]
}
}

View File

@@ -0,0 +1,98 @@
// AWS Simple Storage Service
package s3
import (
"alpha.dagger.io/dagger"
"alpha.dagger.io/dagger/op"
"alpha.dagger.io/aws"
)
// S3 Bucket object(s) sync
#Object: {
// AWS Config
config: aws.#Config
// Source Artifact to upload to S3
source: dagger.#Artifact @dagger(input)
// Target S3 URL (eg. s3://<bucket-name>/<path>/<sub-path>)
target: string @dagger(input)
// Delete files that already exist on remote destination
delete: *false | true @dagger(input)
// Object content type
contentType: string | *"" @dagger(input)
// Always write the object to S3
always: *true | false @dagger(input)
// Upload method
uploadMethod: *"cp" | "sync"
// URL of the uploaded S3 object
url: {
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Exec & {
if always {
always: true
}
env: {
TARGET: target
OPT_CONTENT_TYPE: contentType
if delete {
OPT_DELETE: "1"
}
UPLOAD_METHOD: uploadMethod
}
mount: "/source": from: source
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"-c",
#"""
opts=()
case "$UPLOAD_METHOD" in
sync)
[ -n "$OPT_DELETE" ] && opts+="--delete"
opts+="--exact-timestamps"
;;
cp)
opts+="--recursive"
;;
*)
echo "not supported command"
exit 1
;;
esac
[ -n "$OPT_CONTENT_TYPE" ] && opts+="--content-type $OPT_CONTENT_TYPE"
[ -n "$OPT_DELETE" ] && opts+="--delete"
aws s3 "$UPLOAD_METHOD" ${opts[@]} /source "$TARGET"
echo -n "$TARGET" \
| sed -E 's=^s3://([^/]*)/=https://\1.s3.amazonaws.com/=' \
> /url
"""#,
]
},
op.#Export & {
source: "/url"
format: "string"
},
]
} @dagger(output)
}

View File

@@ -0,0 +1,46 @@
package s3
import (
"alpha.dagger.io/dagger"
"alpha.dagger.io/aws"
"alpha.dagger.io/random"
)
TestConfig: awsConfig: aws.#Config & {
region: "us-east-2"
}
bucket: "dagger-ci"
content: "A simple test sentence"
TestDirectory: dagger.#Artifact
TestS3Object: {
suffix: random.#String & {
seed: "s3"
}
target: "s3://\(bucket)/\(suffix.out)/"
deploy: #Object & {
always: true
config: TestConfig.awsConfig
source: TestDirectory
"target": target
}
verifyFile: #VerifyS3 & {
config: TestConfig.awsConfig
target: deploy.target
url: deploy.url
file: "dirFile.txt"
}
verifyDir: #VerifyS3 & {
config: TestConfig.awsConfig
target: deploy.target
url: deploy.url
file: "foo.txt"
}
}

View File

@@ -0,0 +1 @@
Test recursivity

View File

@@ -0,0 +1 @@
Test directory

View File

@@ -0,0 +1,89 @@
package s3
import (
"alpha.dagger.io/aws"
"alpha.dagger.io/alpine"
"alpha.dagger.io/dagger/op"
)
#List: {
// AWS Config
config: aws.#Config
// Target S3 URL (e.g. s3://<bucket-name>/<path>/<sub-path>)
target: string
// URL: dummy URL, used to force a dependency
url: string
contents: {
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
op.#Exec & {
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"-c",
#"""
aws s3 ls --recursive \#(target) > /contents
"""#,
]
env: URL: url
},
op.#Export & {
source: "/contents"
format: "string"
},
]
}
}
#VerifyS3: {
file: string
config: aws.#Config
target: string
url: string
lists: #List & {
"config": config
"target": target
"url": url
}
test: #up: [
op.#Load & {
from: alpine.#Image & {
package: bash: true
}
},
op.#WriteFile & {
dest: "/test"
content: lists.contents
},
op.#Exec & {
always: true
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"-c",
"grep -q \(file) /test",
]
},
]
}