mirror of
https://github.com/game-ci/unity-builder.git
synced 2025-07-04 12:25:19 -04:00
Implement AWS Fargate support [Large build support] (#246)
* Implement AWS Fargate support * Update aws-tests workflow to include aws-ts-clean * support remoteBuildCpu and remoteBuildContainer parameters for aws * Syntax fix * remove package-lock add yarn.lock * yarn lock * if: github.event.pull_request.draft == false Co-authored-by: mdugdale <mark.dugdale@bossastudios.com>
This commit is contained in:
parent
398eda622f
commit
501c67e40c
60
.github/workflows/aws-tests.yml
vendored
Normal file
60
.github/workflows/aws-tests.yml
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
name: AWS
|
||||||
|
|
||||||
|
on:
|
||||||
|
push: { branches: [aws, aws-ts-clean] }
|
||||||
|
|
||||||
|
env:
|
||||||
|
AWS_REGION: "eu-west-1"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
buildForAllPlatforms:
|
||||||
|
name: AWS Fargate Build
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
projectPath:
|
||||||
|
- test-project
|
||||||
|
unityVersion:
|
||||||
|
# - 2019.2.11f1
|
||||||
|
- 2019.3.15f1
|
||||||
|
targetPlatform:
|
||||||
|
#- StandaloneOSX # Build a macOS standalone (Intel 64-bit).
|
||||||
|
#- StandaloneWindows64 # Build a Windows 64-bit standalone.
|
||||||
|
- StandaloneLinux64 # Build a Linux 64-bit standalone.
|
||||||
|
#- iOS # Build an iOS player.
|
||||||
|
#- Android # Build an Android .apk.
|
||||||
|
#- WebGL # WebGL.
|
||||||
|
# - StandaloneWindows # Build a Windows standalone.
|
||||||
|
# - WSAPlayer # Build an Windows Store Apps player.
|
||||||
|
# - PS4 # Build a PS4 Standalone.
|
||||||
|
# - XboxOne # Build a Xbox One Standalone.
|
||||||
|
# - tvOS # Build to Apple's tvOS platform.
|
||||||
|
# - Switch # Build a Nintendo Switch player
|
||||||
|
# steps
|
||||||
|
steps:
|
||||||
|
- name: Checkout (default)
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
if: github.event.event_type != 'pull_request_target'
|
||||||
|
with:
|
||||||
|
lfs: true
|
||||||
|
- name: Configure AWS Credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
|
aws-region: eu-west-2
|
||||||
|
- uses: ./
|
||||||
|
id: aws-fargate-unity-build
|
||||||
|
env:
|
||||||
|
UNITY_LICENSE: ${{ secrets.UNITY_LICENSE }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
|
AWS_DEFAULT_REGION: eu-west-2
|
||||||
|
with:
|
||||||
|
remoteBuildCluster: aws
|
||||||
|
projectPath: ${{ matrix.projectPath }}
|
||||||
|
unityVersion: ${{ matrix.unityVersion }}
|
||||||
|
targetPlatform: ${{ matrix.targetPlatform }}
|
||||||
|
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
18
action.yml
18
action.yml
@ -30,6 +30,14 @@ inputs:
|
|||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
description: 'Path to a Namespace.Class.StaticMethod to run to perform the build.'
|
description: 'Path to a Namespace.Class.StaticMethod to run to perform the build.'
|
||||||
|
remoteBuildCluster:
|
||||||
|
default: 'local'
|
||||||
|
required: false
|
||||||
|
description: 'Either local, k8s or aws can be used to run builds on a remote cluster. Additional parameters must be configured.'
|
||||||
|
awsStackName:
|
||||||
|
default: 'game-ci'
|
||||||
|
required: false
|
||||||
|
description: 'The Cloud Formation stack name that must be setup before using this option.'
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
default: ''
|
default: ''
|
||||||
required: false
|
required: false
|
||||||
@ -38,18 +46,18 @@ inputs:
|
|||||||
default: ''
|
default: ''
|
||||||
required: false
|
required: false
|
||||||
description: 'Supply a Persistent Volume Claim name to use for the Unity build.'
|
description: 'Supply a Persistent Volume Claim name to use for the Unity build.'
|
||||||
kubeContainerMemory:
|
remoteBuildMemory:
|
||||||
default: '800M'
|
default: '800M'
|
||||||
required: false
|
required: false
|
||||||
description: 'Amount of memory to assign the build container in Kubernetes (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes)'
|
description: 'Amount of memory to assign the remote build container'
|
||||||
kubeContainerCPU:
|
remoteBuildCpu:
|
||||||
default: '0.25'
|
default: '0.25'
|
||||||
required: false
|
required: false
|
||||||
description: 'Amount of CPU time to assign the build container in Kubernetes (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes)'
|
description: 'Amount of CPU time to assign the remote build container'
|
||||||
kubeVolumeSize:
|
kubeVolumeSize:
|
||||||
default: '5Gi'
|
default: '5Gi'
|
||||||
required: false
|
required: false
|
||||||
description: 'Amount of disc space to assign the Kubernetes Persistent Volume (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes)'
|
description: 'Amount of disc space to assign the Kubernetes Persistent Volume'
|
||||||
githubToken:
|
githubToken:
|
||||||
default: ''
|
default: ''
|
||||||
required: false
|
required: false
|
||||||
|
423
dist/cloud-formations/base-setup.yml
vendored
Normal file
423
dist/cloud-formations/base-setup.yml
vendored
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
AWSTemplateFormatVersion: '2010-09-09'
|
||||||
|
Description: AWS Fargate cluster that can span public and private subnets. Supports
|
||||||
|
public facing load balancers, private internal load balancers, and
|
||||||
|
both internal and external service discovery namespaces.
|
||||||
|
Parameters:
|
||||||
|
EnvironmentName:
|
||||||
|
Type: String
|
||||||
|
Default: development
|
||||||
|
Description: "Your deployment environment: DEV, QA , PROD"
|
||||||
|
|
||||||
|
# ContainerPort:
|
||||||
|
# Type: Number
|
||||||
|
# Default: 80
|
||||||
|
# Description: What port number the application inside the docker container is binding to
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Mappings:
|
||||||
|
# Hard values for the subnet masks. These masks define
|
||||||
|
# the range of internal IP addresses that can be assigned.
|
||||||
|
# The VPC can have all IP's from 10.0.0.0 to 10.0.255.255
|
||||||
|
# There are four subnets which cover the ranges:
|
||||||
|
#
|
||||||
|
# 10.0.0.0 - 10.0.0.255
|
||||||
|
# 10.0.1.0 - 10.0.1.255
|
||||||
|
# 10.0.2.0 - 10.0.2.255
|
||||||
|
# 10.0.3.0 - 10.0.3.255
|
||||||
|
|
||||||
|
SubnetConfig:
|
||||||
|
VPC:
|
||||||
|
CIDR: '10.0.0.0/16'
|
||||||
|
PublicOne:
|
||||||
|
CIDR: '10.0.0.0/24'
|
||||||
|
PublicTwo:
|
||||||
|
CIDR: '10.0.1.0/24'
|
||||||
|
|
||||||
|
Resources:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# VPC in which containers will be networked.
|
||||||
|
# It has two public subnets, and two private subnets.
|
||||||
|
# We distribute the subnets across the first two available subnets
|
||||||
|
# for the region, for high availability.
|
||||||
|
VPC:
|
||||||
|
Type: AWS::EC2::VPC
|
||||||
|
Properties:
|
||||||
|
EnableDnsSupport: true
|
||||||
|
EnableDnsHostnames: true
|
||||||
|
CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR']
|
||||||
|
|
||||||
|
EFSServerSecurityGroup:
|
||||||
|
Type: AWS::EC2::SecurityGroup
|
||||||
|
Properties:
|
||||||
|
GroupName: "efs-server-endpoints"
|
||||||
|
GroupDescription: Which client ip addrs are allowed to access EFS server
|
||||||
|
VpcId: !Ref 'VPC'
|
||||||
|
SecurityGroupIngress:
|
||||||
|
- IpProtocol: tcp
|
||||||
|
FromPort: 2049
|
||||||
|
ToPort: 2049
|
||||||
|
SourceSecurityGroupId: !Ref ContainerSecurityGroup
|
||||||
|
#CidrIp: !FindInMap ['SubnetConfig', 'VPC', 'CIDR']
|
||||||
|
# A security group for the containers we will run in Fargate.
|
||||||
|
# Rules are added to this security group based on what ingress you
|
||||||
|
# add for the cluster.
|
||||||
|
ContainerSecurityGroup:
|
||||||
|
Type: AWS::EC2::SecurityGroup
|
||||||
|
Properties:
|
||||||
|
GroupName: "task security group"
|
||||||
|
GroupDescription: Access to the Fargate containers
|
||||||
|
VpcId: !Ref 'VPC'
|
||||||
|
# SecurityGroupIngress:
|
||||||
|
# - IpProtocol: tcp
|
||||||
|
# FromPort: !Ref ContainerPort
|
||||||
|
# ToPort: !Ref ContainerPort
|
||||||
|
# CidrIp: 0.0.0.0/0
|
||||||
|
SecurityGroupEgress:
|
||||||
|
- IpProtocol: -1
|
||||||
|
FromPort: 2049
|
||||||
|
ToPort: 2049
|
||||||
|
CidrIp: "0.0.0.0/0"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Two public subnets, where containers can have public IP addresses
|
||||||
|
PublicSubnetOne:
|
||||||
|
Type: AWS::EC2::Subnet
|
||||||
|
Properties:
|
||||||
|
AvailabilityZone: !Select
|
||||||
|
- 0
|
||||||
|
- Fn::GetAZs: !Ref 'AWS::Region'
|
||||||
|
VpcId: !Ref 'VPC'
|
||||||
|
CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR']
|
||||||
|
# MapPublicIpOnLaunch: true
|
||||||
|
|
||||||
|
PublicSubnetTwo:
|
||||||
|
Type: AWS::EC2::Subnet
|
||||||
|
Properties:
|
||||||
|
AvailabilityZone: !Select
|
||||||
|
- 1
|
||||||
|
- Fn::GetAZs: !Ref 'AWS::Region'
|
||||||
|
VpcId: !Ref 'VPC'
|
||||||
|
CidrBlock: !FindInMap ['SubnetConfig', 'PublicTwo', 'CIDR']
|
||||||
|
# MapPublicIpOnLaunch: true
|
||||||
|
|
||||||
|
|
||||||
|
# Setup networking resources for the public subnets. Containers
|
||||||
|
# in the public subnets have public IP addresses and the routing table
|
||||||
|
# sends network traffic via the internet gateway.
|
||||||
|
InternetGateway:
|
||||||
|
Type: AWS::EC2::InternetGateway
|
||||||
|
GatewayAttachement:
|
||||||
|
Type: AWS::EC2::VPCGatewayAttachment
|
||||||
|
Properties:
|
||||||
|
VpcId: !Ref 'VPC'
|
||||||
|
InternetGatewayId: !Ref 'InternetGateway'
|
||||||
|
|
||||||
|
# Attaching a Internet Gateway to route table makes it public.
|
||||||
|
PublicRouteTable:
|
||||||
|
Type: AWS::EC2::RouteTable
|
||||||
|
Properties:
|
||||||
|
VpcId: !Ref 'VPC'
|
||||||
|
PublicRoute:
|
||||||
|
Type: AWS::EC2::Route
|
||||||
|
DependsOn: GatewayAttachement
|
||||||
|
Properties:
|
||||||
|
RouteTableId: !Ref 'PublicRouteTable'
|
||||||
|
DestinationCidrBlock: '0.0.0.0/0'
|
||||||
|
GatewayId: !Ref 'InternetGateway'
|
||||||
|
|
||||||
|
# Attaching a public route table makes a subnet public.
|
||||||
|
PublicSubnetOneRouteTableAssociation:
|
||||||
|
Type: AWS::EC2::SubnetRouteTableAssociation
|
||||||
|
Properties:
|
||||||
|
SubnetId: !Ref PublicSubnetOne
|
||||||
|
RouteTableId: !Ref PublicRouteTable
|
||||||
|
PublicSubnetTwoRouteTableAssociation:
|
||||||
|
Type: AWS::EC2::SubnetRouteTableAssociation
|
||||||
|
Properties:
|
||||||
|
SubnetId: !Ref PublicSubnetTwo
|
||||||
|
RouteTableId: !Ref PublicRouteTable
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ECS Resources
|
||||||
|
ECSCluster:
|
||||||
|
Type: AWS::ECS::Cluster
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# A role used to allow AWS Autoscaling to inspect stats and adjust scaleable targets
|
||||||
|
# on your AWS account
|
||||||
|
AutoscalingRole:
|
||||||
|
Type: AWS::IAM::Role
|
||||||
|
Properties:
|
||||||
|
AssumeRolePolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Principal:
|
||||||
|
Service: [application-autoscaling.amazonaws.com]
|
||||||
|
Action: ['sts:AssumeRole']
|
||||||
|
Path: /
|
||||||
|
Policies:
|
||||||
|
- PolicyName: service-autoscaling
|
||||||
|
PolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Action:
|
||||||
|
- 'application-autoscaling:*'
|
||||||
|
- 'cloudwatch:DescribeAlarms'
|
||||||
|
- 'cloudwatch:PutMetricAlarm'
|
||||||
|
- 'ecs:DescribeServices'
|
||||||
|
- 'ecs:UpdateService'
|
||||||
|
Resource: '*'
|
||||||
|
|
||||||
|
# This is an IAM role which authorizes ECS to manage resources on your
|
||||||
|
# account on your behalf, such as updating your load balancer with the
|
||||||
|
# details of where your containers are, so that traffic can reach your
|
||||||
|
# containers.
|
||||||
|
ECSRole:
|
||||||
|
Type: AWS::IAM::Role
|
||||||
|
Properties:
|
||||||
|
AssumeRolePolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Principal:
|
||||||
|
Service: [ecs.amazonaws.com]
|
||||||
|
Action: ['sts:AssumeRole']
|
||||||
|
Path: /
|
||||||
|
Policies:
|
||||||
|
- PolicyName: ecs-service
|
||||||
|
PolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Action:
|
||||||
|
# Rules which allow ECS to attach network interfaces to instances
|
||||||
|
# on your behalf in order for awsvpc networking mode to work right
|
||||||
|
- 'ec2:AttachNetworkInterface'
|
||||||
|
- 'ec2:CreateNetworkInterface'
|
||||||
|
- 'ec2:CreateNetworkInterfacePermission'
|
||||||
|
- 'ec2:DeleteNetworkInterface'
|
||||||
|
- 'ec2:DeleteNetworkInterfacePermission'
|
||||||
|
- 'ec2:Describe*'
|
||||||
|
- 'ec2:DetachNetworkInterface'
|
||||||
|
|
||||||
|
# Rules which allow ECS to update load balancers on your behalf
|
||||||
|
# with the information sabout how to send traffic to your containers
|
||||||
|
- 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer'
|
||||||
|
- 'elasticloadbalancing:DeregisterTargets'
|
||||||
|
- 'elasticloadbalancing:Describe*'
|
||||||
|
- 'elasticloadbalancing:RegisterInstancesWithLoadBalancer'
|
||||||
|
- 'elasticloadbalancing:RegisterTargets'
|
||||||
|
Resource: '*'
|
||||||
|
|
||||||
|
# This is a role which is used by the ECS tasks themselves.
|
||||||
|
ECSTaskExecutionRole:
|
||||||
|
Type: AWS::IAM::Role
|
||||||
|
Properties:
|
||||||
|
AssumeRolePolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Principal:
|
||||||
|
Service: [ecs-tasks.amazonaws.com]
|
||||||
|
Action: ['sts:AssumeRole']
|
||||||
|
Path: /
|
||||||
|
Policies:
|
||||||
|
- PolicyName: AmazonECSTaskExecutionRolePolicy
|
||||||
|
PolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Action:
|
||||||
|
# Allow upload to S3
|
||||||
|
- 's3:GetObject'
|
||||||
|
- 's3:GetObjectVersion'
|
||||||
|
- 's3:PutObject'
|
||||||
|
|
||||||
|
# Allow the use of secret manager
|
||||||
|
- 'secretsmanager:GetSecretValue'
|
||||||
|
- 'kms:Decrypt'
|
||||||
|
|
||||||
|
# Allow the ECS Tasks to download images from ECR
|
||||||
|
- 'ecr:GetAuthorizationToken'
|
||||||
|
- 'ecr:BatchCheckLayerAvailability'
|
||||||
|
- 'ecr:GetDownloadUrlForLayer'
|
||||||
|
- 'ecr:BatchGetImage'
|
||||||
|
|
||||||
|
# Allow the ECS tasks to upload logs to CloudWatch
|
||||||
|
- 'logs:CreateLogStream'
|
||||||
|
- 'logs:PutLogEvents'
|
||||||
|
Resource: '*'
|
||||||
|
|
||||||
|
DeleteCFNLambdaExecutionRole:
|
||||||
|
Type: "AWS::IAM::Role"
|
||||||
|
Properties:
|
||||||
|
AssumeRolePolicyDocument:
|
||||||
|
Version: "2012-10-17"
|
||||||
|
Statement:
|
||||||
|
- Effect: "Allow"
|
||||||
|
Principal:
|
||||||
|
Service: ["lambda.amazonaws.com"]
|
||||||
|
Action: "sts:AssumeRole"
|
||||||
|
Path: "/"
|
||||||
|
Policies:
|
||||||
|
- PolicyName: DeleteCFNLambdaExecutionRole
|
||||||
|
PolicyDocument:
|
||||||
|
Version: "2012-10-17"
|
||||||
|
Statement:
|
||||||
|
- Effect: "Allow"
|
||||||
|
Action:
|
||||||
|
- "logs:CreateLogGroup"
|
||||||
|
- "logs:CreateLogStream"
|
||||||
|
- "logs:PutLogEvents"
|
||||||
|
Resource: "arn:aws:logs:*:*:*"
|
||||||
|
- Effect: "Allow"
|
||||||
|
Action:
|
||||||
|
- "cloudformation:DeleteStack"
|
||||||
|
- "kinesis:DeleteStream"
|
||||||
|
- "secretsmanager:DeleteSecret"
|
||||||
|
- "kinesis:DescribeStreamSummary"
|
||||||
|
- "logs:DeleteLogGroup"
|
||||||
|
- "logs:DeleteSubscriptionFilter"
|
||||||
|
- "ecs:DeregisterTaskDefinition"
|
||||||
|
- "lambda:DeleteFunction"
|
||||||
|
- "lambda:InvokeFunction"
|
||||||
|
- "events:RemoveTargets"
|
||||||
|
- "events:DeleteRule"
|
||||||
|
- "lambda:RemovePermission"
|
||||||
|
Resource: "*"
|
||||||
|
|
||||||
|
### cloud watch to kinesis role
|
||||||
|
|
||||||
|
CloudWatchIAMRole:
|
||||||
|
Type: AWS::IAM::Role
|
||||||
|
Properties:
|
||||||
|
AssumeRolePolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Principal:
|
||||||
|
Service: [logs.amazonaws.com]
|
||||||
|
Action: ['sts:AssumeRole']
|
||||||
|
Path: /
|
||||||
|
Policies:
|
||||||
|
- PolicyName: service-autoscaling
|
||||||
|
PolicyDocument:
|
||||||
|
Statement:
|
||||||
|
- Effect: Allow
|
||||||
|
Action:
|
||||||
|
- 'kinesis:PutRecord'
|
||||||
|
Resource: '*'
|
||||||
|
#####################EFS#####################
|
||||||
|
|
||||||
|
EfsFileStorage:
|
||||||
|
Type: 'AWS::EFS::FileSystem'
|
||||||
|
Properties:
|
||||||
|
BackupPolicy:
|
||||||
|
Status: ENABLED
|
||||||
|
PerformanceMode: maxIO
|
||||||
|
Encrypted: false
|
||||||
|
|
||||||
|
|
||||||
|
FileSystemPolicy:
|
||||||
|
Version: "2012-10-17"
|
||||||
|
Statement:
|
||||||
|
- Effect: "Allow"
|
||||||
|
Action:
|
||||||
|
- "elasticfilesystem:ClientMount"
|
||||||
|
- "elasticfilesystem:ClientWrite"
|
||||||
|
- "elasticfilesystem:ClientRootAccess"
|
||||||
|
Principal:
|
||||||
|
AWS: "*"
|
||||||
|
|
||||||
|
|
||||||
|
MountTargetResource1:
|
||||||
|
Type: AWS::EFS::MountTarget
|
||||||
|
Properties:
|
||||||
|
FileSystemId: !Ref EfsFileStorage
|
||||||
|
SubnetId: !Ref PublicSubnetOne
|
||||||
|
SecurityGroups:
|
||||||
|
- !Ref EFSServerSecurityGroup
|
||||||
|
|
||||||
|
MountTargetResource2:
|
||||||
|
Type: AWS::EFS::MountTarget
|
||||||
|
Properties:
|
||||||
|
FileSystemId: !Ref EfsFileStorage
|
||||||
|
SubnetId: !Ref PublicSubnetTwo
|
||||||
|
SecurityGroups:
|
||||||
|
- !Ref EFSServerSecurityGroup
|
||||||
|
|
||||||
|
S3Bucket:
|
||||||
|
Type: 'AWS::S3::Bucket'
|
||||||
|
DeletionPolicy: Retain
|
||||||
|
Properties:
|
||||||
|
BucketName: game-ci-storage
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Outputs:
|
||||||
|
|
||||||
|
EfsFileStorageId:
|
||||||
|
Description: 'The connection endpoint for the database.'
|
||||||
|
Value: !Ref EfsFileStorage
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:EfsFileStorageId
|
||||||
|
ClusterName:
|
||||||
|
Description: The name of the ECS cluster
|
||||||
|
Value: !Ref 'ECSCluster'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:ClusterName
|
||||||
|
AutoscalingRole:
|
||||||
|
Description: The ARN of the role used for autoscaling
|
||||||
|
Value: !GetAtt 'AutoscalingRole.Arn'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:AutoscalingRole
|
||||||
|
ECSRole:
|
||||||
|
Description: The ARN of the ECS role
|
||||||
|
Value: !GetAtt 'ECSRole.Arn'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:ECSRole
|
||||||
|
ECSTaskExecutionRole:
|
||||||
|
Description: The ARN of the ECS role tsk execution role
|
||||||
|
Value: !GetAtt 'ECSTaskExecutionRole.Arn'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:ECSTaskExecutionRole
|
||||||
|
|
||||||
|
DeleteCFNLambdaExecutionRole:
|
||||||
|
Description: Lambda execution role for cleaning up cloud formations
|
||||||
|
Value: !GetAtt 'DeleteCFNLambdaExecutionRole.Arn'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:DeleteCFNLambdaExecutionRole
|
||||||
|
|
||||||
|
CloudWatchIAMRole:
|
||||||
|
Description: The ARN of the CloudWatch role for subscription filter
|
||||||
|
Value: !GetAtt 'CloudWatchIAMRole.Arn'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:CloudWatchIAMRole
|
||||||
|
VpcId:
|
||||||
|
Description: The ID of the VPC that this stack is deployed in
|
||||||
|
Value: !Ref 'VPC'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:VpcId
|
||||||
|
PublicSubnetOne:
|
||||||
|
Description: Public subnet one
|
||||||
|
Value: !Ref 'PublicSubnetOne'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:PublicSubnetOne
|
||||||
|
PublicSubnetTwo:
|
||||||
|
Description: Public subnet two
|
||||||
|
Value: !Ref 'PublicSubnetTwo'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:PublicSubnetTwo
|
||||||
|
|
||||||
|
ContainerSecurityGroup:
|
||||||
|
Description: A security group used to allow Fargate containers to receive traffic
|
||||||
|
Value: !Ref 'ContainerSecurityGroup'
|
||||||
|
Export:
|
||||||
|
Name: !Sub ${EnvironmentName}:ContainerSecurityGroup
|
143
dist/cloud-formations/cloudformation-stack-ttl.yml
vendored
Normal file
143
dist/cloud-formations/cloudformation-stack-ttl.yml
vendored
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
AWSTemplateFormatVersion: '2010-09-09'
|
||||||
|
Description: Schedule automatic deletion of CloudFormation stacks
|
||||||
|
Metadata:
|
||||||
|
AWS::CloudFormation::Interface:
|
||||||
|
ParameterGroups:
|
||||||
|
- Label:
|
||||||
|
default: Input configuration
|
||||||
|
Parameters:
|
||||||
|
- StackName
|
||||||
|
- TTL
|
||||||
|
ParameterLabels:
|
||||||
|
StackName:
|
||||||
|
default: Stack name
|
||||||
|
TTL:
|
||||||
|
default: Time-to-live
|
||||||
|
Parameters:
|
||||||
|
EnvironmentName:
|
||||||
|
Type: String
|
||||||
|
Default: development
|
||||||
|
Description: 'Your deployment environment: DEV, QA , PROD'
|
||||||
|
BUILDID:
|
||||||
|
Type: String
|
||||||
|
Default: ''
|
||||||
|
StackName:
|
||||||
|
Type: String
|
||||||
|
Description: Stack name that will be deleted.
|
||||||
|
DeleteStackName:
|
||||||
|
Type: String
|
||||||
|
Description: Stack name that will be deleted.
|
||||||
|
TTL:
|
||||||
|
Type: Number
|
||||||
|
Description: Time-to-live in minutes for the stack.
|
||||||
|
Resources:
|
||||||
|
DeleteCFNLambda:
|
||||||
|
Type: "AWS::Lambda::Function"
|
||||||
|
Properties:
|
||||||
|
FunctionName: !Join [ "", [ 'DeleteCFNLambda', !Ref BUILDID ] ]
|
||||||
|
Code:
|
||||||
|
ZipFile: |
|
||||||
|
import boto3
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
|
||||||
|
stack_name = os.environ['stackName']
|
||||||
|
delete_stack_name = os.environ['deleteStackName']
|
||||||
|
|
||||||
|
def delete_cfn(stack_name):
|
||||||
|
try:
|
||||||
|
cfn = boto3.resource('cloudformation')
|
||||||
|
stack = cfn.Stack(stack_name)
|
||||||
|
stack.delete()
|
||||||
|
return "SUCCESS"
|
||||||
|
except:
|
||||||
|
return "ERROR"
|
||||||
|
|
||||||
|
def handler(event, context):
|
||||||
|
print("Received event:")
|
||||||
|
print(json.dumps(event))
|
||||||
|
result = delete_cfn(stack_name)
|
||||||
|
delete_cfn(delete_stack_name)
|
||||||
|
return result
|
||||||
|
Environment:
|
||||||
|
Variables:
|
||||||
|
stackName: !Ref 'StackName'
|
||||||
|
deleteStackName: !Ref 'DeleteStackName'
|
||||||
|
Handler: "index.handler"
|
||||||
|
Runtime: "python3.6"
|
||||||
|
Timeout: "5"
|
||||||
|
Role:
|
||||||
|
'Fn::ImportValue': !Sub '${EnvironmentName}:DeleteCFNLambdaExecutionRole'
|
||||||
|
DeleteStackEventRule:
|
||||||
|
DependsOn:
|
||||||
|
- DeleteCFNLambda
|
||||||
|
- GenerateCronExpression
|
||||||
|
Type: "AWS::Events::Rule"
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'DeleteStackEventRule', !Ref BUILDID ] ]
|
||||||
|
Description: Delete stack event
|
||||||
|
ScheduleExpression: !GetAtt GenerateCronExpression.cron_exp
|
||||||
|
State: "ENABLED"
|
||||||
|
Targets:
|
||||||
|
-
|
||||||
|
Arn: !GetAtt DeleteCFNLambda.Arn
|
||||||
|
Id: 'DeleteCFNLambda'
|
||||||
|
PermissionForDeleteCFNLambda:
|
||||||
|
Type: "AWS::Lambda::Permission"
|
||||||
|
DependsOn:
|
||||||
|
- DeleteStackEventRule
|
||||||
|
Properties:
|
||||||
|
FunctionName: !Join [ "", [ 'DeleteCFNLambda', !Ref BUILDID ] ]
|
||||||
|
Action: "lambda:InvokeFunction"
|
||||||
|
Principal: "events.amazonaws.com"
|
||||||
|
SourceArn: !GetAtt DeleteStackEventRule.Arn
|
||||||
|
GenerateCronExpLambda:
|
||||||
|
Type: "AWS::Lambda::Function"
|
||||||
|
Properties:
|
||||||
|
FunctionName: !Join [ "", [ 'GenerateCronExpressionLambda', !Ref BUILDID ] ]
|
||||||
|
Code:
|
||||||
|
ZipFile: |
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
import cfnresponse
|
||||||
|
|
||||||
|
def deletion_time(ttl):
|
||||||
|
delete_at_time = datetime.now() + timedelta(minutes=int(ttl))
|
||||||
|
hh = delete_at_time.hour
|
||||||
|
mm = delete_at_time.minute
|
||||||
|
yyyy = delete_at_time.year
|
||||||
|
month = delete_at_time.month
|
||||||
|
dd = delete_at_time.day
|
||||||
|
# minutes hours day month day-of-week year
|
||||||
|
cron_exp = "cron({} {} {} {} ? {})".format(mm, hh, dd, month, yyyy)
|
||||||
|
return cron_exp
|
||||||
|
|
||||||
|
def handler(event, context):
|
||||||
|
print('Received event: %s' % json.dumps(event))
|
||||||
|
status = cfnresponse.SUCCESS
|
||||||
|
try:
|
||||||
|
if event['RequestType'] == 'Delete':
|
||||||
|
cfnresponse.send(event, context, status, {})
|
||||||
|
else:
|
||||||
|
ttl = event['ResourceProperties']['ttl']
|
||||||
|
responseData = {}
|
||||||
|
responseData['cron_exp'] = deletion_time(ttl)
|
||||||
|
cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error('Exception: %s' % e, exc_info=True)
|
||||||
|
status = cfnresponse.FAILED
|
||||||
|
cfnresponse.send(event, context, status, {}, None)
|
||||||
|
Handler: "index.handler"
|
||||||
|
Runtime: "python3.6"
|
||||||
|
Timeout: "5"
|
||||||
|
Role:
|
||||||
|
'Fn::ImportValue': !Sub '${EnvironmentName}:DeleteCFNLambdaExecutionRole'
|
||||||
|
GenerateCronExpression:
|
||||||
|
Type: "Custom::GenerateCronExpression"
|
||||||
|
Version: "1.0"
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'GenerateCronExpression', !Ref BUILDID ] ]
|
||||||
|
ServiceToken: !GetAtt GenerateCronExpLambda.Arn
|
||||||
|
ttl: !Ref 'TTL'
|
322
dist/cloud-formations/task-def-formation.yml
vendored
Normal file
322
dist/cloud-formations/task-def-formation.yml
vendored
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
AWSTemplateFormatVersion: 2010-09-09
|
||||||
|
Description: >-
|
||||||
|
AWS Fargate cluster that can span public and private subnets. Supports public
|
||||||
|
facing load balancers, private internal load balancers, and both internal and
|
||||||
|
external service discovery namespaces.
|
||||||
|
Parameters:
|
||||||
|
EnvironmentName:
|
||||||
|
Type: String
|
||||||
|
Default: development
|
||||||
|
Description: 'Your deployment environment: DEV, QA , PROD'
|
||||||
|
ServiceName:
|
||||||
|
Type: String
|
||||||
|
Default: example
|
||||||
|
Description: A name for the service
|
||||||
|
ImageUrl:
|
||||||
|
Type: String
|
||||||
|
Default: nginx
|
||||||
|
Description: >-
|
||||||
|
The url of a docker image that contains the application process that will
|
||||||
|
handle the traffic for this service
|
||||||
|
ContainerPort:
|
||||||
|
Type: Number
|
||||||
|
Default: 80
|
||||||
|
Description: What port number the application inside the docker container is binding to
|
||||||
|
ContainerCpu:
|
||||||
|
Type: Number
|
||||||
|
Default: 1024
|
||||||
|
Description: How much CPU to give the container. 1024 is 1 CPU
|
||||||
|
ContainerMemory:
|
||||||
|
Type: Number
|
||||||
|
Default: 2048
|
||||||
|
Description: How much memory in megabytes to give the container
|
||||||
|
BUILDID:
|
||||||
|
Type: String
|
||||||
|
Default: ''
|
||||||
|
Command:
|
||||||
|
Type: String
|
||||||
|
Default: 'ls'
|
||||||
|
EntryPoint:
|
||||||
|
Type: String
|
||||||
|
Default: '/bin/sh'
|
||||||
|
WorkingDirectory:
|
||||||
|
Type: String
|
||||||
|
Default: '/efsdata/'
|
||||||
|
Role:
|
||||||
|
Type: String
|
||||||
|
Default: ''
|
||||||
|
Description: >-
|
||||||
|
(Optional) An IAM role to give the service's containers if the code within
|
||||||
|
needs to access other AWS resources like S3 buckets, DynamoDB tables, etc
|
||||||
|
EFSMountDirectory:
|
||||||
|
Type: String
|
||||||
|
Default: '/efsdata'
|
||||||
|
GithubToken:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
UnityLicense:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
UnityEmail:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
UnityPassword:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
UnitySerial:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
AndroidKeystoreBase64:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
AndroidKeystorePass:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
AndroidKeyAliasPass:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
AWSAccessKeyID:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
AWSSecretAccessKey:
|
||||||
|
Type: String
|
||||||
|
Default: '0'
|
||||||
|
Mappings:
|
||||||
|
SubnetConfig:
|
||||||
|
VPC:
|
||||||
|
CIDR: 10.0.0.0/16
|
||||||
|
PublicOne:
|
||||||
|
CIDR: 10.0.0.0/24
|
||||||
|
PublicTwo:
|
||||||
|
CIDR: 10.0.1.0/24
|
||||||
|
Conditions:
|
||||||
|
HasCustomRole: !Not
|
||||||
|
- !Equals
|
||||||
|
- Ref: Role
|
||||||
|
- ''
|
||||||
|
Resources:
|
||||||
|
LogGroup:
|
||||||
|
Type: 'AWS::Logs::LogGroup'
|
||||||
|
Properties:
|
||||||
|
LogGroupName: !Ref ServiceName
|
||||||
|
Metadata:
|
||||||
|
'AWS::CloudFormation::Designer':
|
||||||
|
id: aece53ae-b82d-4267-bc16-ed964b05db27
|
||||||
|
SubscriptionFilter:
|
||||||
|
Type: 'AWS::Logs::SubscriptionFilter'
|
||||||
|
Properties:
|
||||||
|
FilterPattern: ''
|
||||||
|
RoleArn:
|
||||||
|
'Fn::ImportValue': !Sub '${EnvironmentName}:CloudWatchIAMRole'
|
||||||
|
LogGroupName: !Ref ServiceName
|
||||||
|
DestinationArn:
|
||||||
|
'Fn::GetAtt':
|
||||||
|
- KinesisStream
|
||||||
|
- Arn
|
||||||
|
Metadata:
|
||||||
|
'AWS::CloudFormation::Designer':
|
||||||
|
id: 7f809e91-9e5d-4678-98c1-c5085956c480
|
||||||
|
DependsOn:
|
||||||
|
- LogGroup
|
||||||
|
- KinesisStream
|
||||||
|
KinesisStream:
|
||||||
|
Type: 'AWS::Kinesis::Stream'
|
||||||
|
Properties:
|
||||||
|
Name: !Ref ServiceName
|
||||||
|
ShardCount: 1
|
||||||
|
Metadata:
|
||||||
|
'AWS::CloudFormation::Designer':
|
||||||
|
id: c6f18447-b879-4696-8873-f981b2cedd2b
|
||||||
|
|
||||||
|
GithubTokenSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'GithubToken', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref GithubToken
|
||||||
|
|
||||||
|
UnityLicenseSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'UnityLicense', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref UnityLicense
|
||||||
|
|
||||||
|
UnityEmailSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'UnityEmail', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref UnityEmail
|
||||||
|
|
||||||
|
UnityPasswordSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'UnityPassword', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref UnityPassword
|
||||||
|
|
||||||
|
UnitySerialSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'UnitySerial', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref UnitySerial
|
||||||
|
|
||||||
|
AndroidKeystoreBase64Secret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'AndroidKeystoreBase64', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref AndroidKeystoreBase64
|
||||||
|
|
||||||
|
AndroidKeystorePassSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'AndroidKeystorePass', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref AndroidKeystorePass
|
||||||
|
|
||||||
|
AndroidKeyAliasPassSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'AndroidKeyAliasPass', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref AndroidKeyAliasPass
|
||||||
|
AWSAccessKeyIDSecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'AWSAccessKeyID', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref AWSAccessKeyID
|
||||||
|
AWSSecretAccessKeySecret:
|
||||||
|
Type: AWS::SecretsManager::Secret
|
||||||
|
Properties:
|
||||||
|
Name: !Join [ "", [ 'AWSSecretAccessKey', !Ref BUILDID ] ]
|
||||||
|
SecretString: !Ref AWSSecretAccessKey
|
||||||
|
|
||||||
|
TaskDefinition:
|
||||||
|
Type: 'AWS::ECS::TaskDefinition'
|
||||||
|
Properties:
|
||||||
|
Family: !Ref ServiceName
|
||||||
|
Cpu: !Ref ContainerCpu
|
||||||
|
Memory: !Ref ContainerMemory
|
||||||
|
NetworkMode: awsvpc
|
||||||
|
Volumes:
|
||||||
|
- Name: efs-data
|
||||||
|
EFSVolumeConfiguration:
|
||||||
|
FilesystemId:
|
||||||
|
'Fn::ImportValue': !Sub '${EnvironmentName}:EfsFileStorageId'
|
||||||
|
TransitEncryption: ENABLED
|
||||||
|
RequiresCompatibilities:
|
||||||
|
- FARGATE
|
||||||
|
ExecutionRoleArn:
|
||||||
|
'Fn::ImportValue': !Sub '${EnvironmentName}:ECSTaskExecutionRole'
|
||||||
|
TaskRoleArn:
|
||||||
|
'Fn::If':
|
||||||
|
- HasCustomRole
|
||||||
|
- !Ref Role
|
||||||
|
- !Ref 'AWS::NoValue'
|
||||||
|
ContainerDefinitions:
|
||||||
|
- Name: !Ref ServiceName
|
||||||
|
Cpu: !Ref ContainerCpu
|
||||||
|
Memory: !Ref ContainerMemory
|
||||||
|
Image: !Ref ImageUrl
|
||||||
|
EntryPoint:
|
||||||
|
Fn::Split:
|
||||||
|
- ","
|
||||||
|
- !Ref EntryPoint
|
||||||
|
Command:
|
||||||
|
Fn::Split:
|
||||||
|
- ","
|
||||||
|
- !Ref Command
|
||||||
|
WorkingDirectory: !Ref WorkingDirectory
|
||||||
|
Environment:
|
||||||
|
- Name: ALLOW_EMPTY_PASSWORD
|
||||||
|
Value: 'yes'
|
||||||
|
MountPoints:
|
||||||
|
- SourceVolume: efs-data
|
||||||
|
ContainerPath: !Ref EFSMountDirectory
|
||||||
|
ReadOnly: false
|
||||||
|
Secrets:
|
||||||
|
- Name: 'GITHUB_TOKEN'
|
||||||
|
ValueFrom: !Ref GithubTokenSecret
|
||||||
|
- Name: 'UNITY_LICENSE'
|
||||||
|
ValueFrom: !Ref UnityLicenseSecret
|
||||||
|
- Name: 'UNITY_EMAIL'
|
||||||
|
ValueFrom: !Ref UnityEmailSecret
|
||||||
|
- Name: 'UNITY_PASSWORD'
|
||||||
|
ValueFrom: !Ref UnityPasswordSecret
|
||||||
|
- Name: 'UNITY_SERIAL'
|
||||||
|
ValueFrom: !Ref UnitySerialSecret
|
||||||
|
- Name: 'ANDROID_KEYSTORE_BASE64'
|
||||||
|
ValueFrom: !Ref AndroidKeystoreBase64Secret
|
||||||
|
- Name: 'ANDROID_KEYSTORE_PASS'
|
||||||
|
ValueFrom: !Ref AndroidKeystorePassSecret
|
||||||
|
- Name: 'AWS_ACCESS_KEY_ID'
|
||||||
|
ValueFrom: !Ref AWSAccessKeyIDSecret
|
||||||
|
- Name: 'AWS_SECRET_ACCESS_KEY'
|
||||||
|
ValueFrom: !Ref AWSSecretAccessKeySecret
|
||||||
|
LogConfiguration:
|
||||||
|
LogDriver: awslogs
|
||||||
|
Options:
|
||||||
|
awslogs-group: !Ref ServiceName
|
||||||
|
awslogs-region: !Ref 'AWS::Region'
|
||||||
|
awslogs-stream-prefix: !Ref ServiceName
|
||||||
|
Metadata:
|
||||||
|
'AWS::CloudFormation::Designer':
|
||||||
|
id: dabb0116-abe0-48a6-a8af-cf9111c879a5
|
||||||
|
DependsOn:
|
||||||
|
- LogGroup
|
||||||
|
Metadata:
|
||||||
|
'AWS::CloudFormation::Designer':
|
||||||
|
dabb0116-abe0-48a6-a8af-cf9111c879a5:
|
||||||
|
size:
|
||||||
|
width: 60
|
||||||
|
height: 60
|
||||||
|
position:
|
||||||
|
x: 270
|
||||||
|
'y': 90
|
||||||
|
z: 1
|
||||||
|
embeds: []
|
||||||
|
dependson:
|
||||||
|
- aece53ae-b82d-4267-bc16-ed964b05db27
|
||||||
|
c6f18447-b879-4696-8873-f981b2cedd2b:
|
||||||
|
size:
|
||||||
|
width: 60
|
||||||
|
height: 60
|
||||||
|
position:
|
||||||
|
x: 270
|
||||||
|
'y': 210
|
||||||
|
z: 1
|
||||||
|
embeds: []
|
||||||
|
7f809e91-9e5d-4678-98c1-c5085956c480:
|
||||||
|
size:
|
||||||
|
width: 60
|
||||||
|
height: 60
|
||||||
|
position:
|
||||||
|
x: 60
|
||||||
|
'y': 300
|
||||||
|
z: 1
|
||||||
|
embeds: []
|
||||||
|
dependson:
|
||||||
|
- aece53ae-b82d-4267-bc16-ed964b05db27
|
||||||
|
- c6f18447-b879-4696-8873-f981b2cedd2b
|
||||||
|
aece53ae-b82d-4267-bc16-ed964b05db27:
|
||||||
|
size:
|
||||||
|
width: 150
|
||||||
|
height: 150
|
||||||
|
position:
|
||||||
|
x: 60
|
||||||
|
'y': 90
|
||||||
|
z: 1
|
||||||
|
embeds: []
|
||||||
|
4d2da56c-3643-46b8-aaee-e46e19f95fcc:
|
||||||
|
source:
|
||||||
|
id: 7f809e91-9e5d-4678-98c1-c5085956c480
|
||||||
|
target:
|
||||||
|
id: aece53ae-b82d-4267-bc16-ed964b05db27
|
||||||
|
z: 11
|
||||||
|
14eb957b-f094-4653-93c4-77b2f851953c:
|
||||||
|
source:
|
||||||
|
id: 7f809e91-9e5d-4678-98c1-c5085956c480
|
||||||
|
target:
|
||||||
|
id: c6f18447-b879-4696-8873-f981b2cedd2b
|
||||||
|
z: 12
|
||||||
|
85c57444-e5bb-4230-bc85-e545cd4558f6:
|
||||||
|
source:
|
||||||
|
id: dabb0116-abe0-48a6-a8af-cf9111c879a5
|
||||||
|
target:
|
||||||
|
id: aece53ae-b82d-4267-bc16-ed964b05db27
|
||||||
|
z: 13
|
BIN
dist/index.js
generated
vendored
BIN
dist/index.js
generated
vendored
Binary file not shown.
BIN
dist/index.js.map
generated
vendored
BIN
dist/index.js.map
generated
vendored
Binary file not shown.
BIN
dist/licenses.txt
generated
vendored
BIN
dist/licenses.txt
generated
vendored
Binary file not shown.
@ -17,8 +17,10 @@
|
|||||||
"@actions/core": "^1.2.6",
|
"@actions/core": "^1.2.6",
|
||||||
"@actions/exec": "^1.0.4",
|
"@actions/exec": "^1.0.4",
|
||||||
"@actions/github": "^2.2.0",
|
"@actions/github": "^2.2.0",
|
||||||
|
"aws-sdk": "^2.812.0",
|
||||||
"base-64": "^1.0.0",
|
"base-64": "^1.0.0",
|
||||||
"kubernetes-client": "^9.0.0",
|
"kubernetes-client": "^9.0.0",
|
||||||
|
"nanoid": "3.1.20",
|
||||||
"semver": "^7.3.2"
|
"semver": "^7.3.2"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
29
src/index.ts
29
src/index.ts
@ -1,5 +1,5 @@
|
|||||||
import * as core from '@actions/core';
|
import * as core from '@actions/core';
|
||||||
import { Action, BuildParameters, Cache, Docker, ImageTag, Kubernetes, Output } from './model';
|
import { Action, BuildParameters, Cache, Docker, ImageTag, Kubernetes, Output, AWS } from './model';
|
||||||
|
|
||||||
async function run() {
|
async function run() {
|
||||||
try {
|
try {
|
||||||
@ -10,14 +10,25 @@ async function run() {
|
|||||||
|
|
||||||
const buildParameters = await BuildParameters.create();
|
const buildParameters = await BuildParameters.create();
|
||||||
const baseImage = new ImageTag(buildParameters);
|
const baseImage = new ImageTag(buildParameters);
|
||||||
if (buildParameters.kubeConfig) {
|
let builtImage;
|
||||||
core.info('Building with Kubernetes');
|
|
||||||
await Kubernetes.runBuildJob(buildParameters, baseImage);
|
switch (buildParameters.remoteBuildCluster) {
|
||||||
} else {
|
case 'k8s':
|
||||||
// Build docker image
|
core.info('Building with Kubernetes');
|
||||||
// TODO: No image required (instead use a version published to dockerhub for the action, supply credentials for github cloning)
|
await Kubernetes.runBuildJob(buildParameters, baseImage);
|
||||||
const builtImage = await Docker.build({ path: actionFolder, dockerfile, baseImage });
|
break;
|
||||||
await Docker.run(builtImage, { workspace, ...buildParameters });
|
|
||||||
|
case 'aws':
|
||||||
|
core.info('Building with AWS');
|
||||||
|
await AWS.runBuildJob(buildParameters, baseImage);
|
||||||
|
break;
|
||||||
|
|
||||||
|
// default and local case
|
||||||
|
default:
|
||||||
|
core.info('Building locally');
|
||||||
|
builtImage = await Docker.build({ path: actionFolder, dockerfile, baseImage });
|
||||||
|
await Docker.run(builtImage, { workspace, ...buildParameters });
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set output
|
// Set output
|
||||||
|
607
src/model/aws.ts
Normal file
607
src/model/aws.ts
Normal file
@ -0,0 +1,607 @@
|
|||||||
|
import * as SDK from 'aws-sdk';
|
||||||
|
import { customAlphabet } from 'nanoid';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as core from '@actions/core';
|
||||||
|
import * as zlib from 'zlib';
|
||||||
|
const alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz';
|
||||||
|
const repositoryDirectoryName = 'repo';
|
||||||
|
const efsDirectoryName = 'data';
|
||||||
|
const cacheDirectoryName = 'cache';
|
||||||
|
|
||||||
|
class AWS {
|
||||||
|
static async runBuildJob(buildParameters, baseImage) {
|
||||||
|
try {
|
||||||
|
const nanoid = customAlphabet(alphabet, 4);
|
||||||
|
const buildUid = `${process.env.GITHUB_RUN_NUMBER}-${buildParameters.platform
|
||||||
|
.replace('Standalone', '')
|
||||||
|
.replace('standalone', '')}-${nanoid()}`;
|
||||||
|
const branchName = process.env.GITHUB_REF?.split('/').reverse()[0];
|
||||||
|
|
||||||
|
core.info('Starting part 1/4 (clone from github and restore cache)');
|
||||||
|
await this.run(
|
||||||
|
buildUid,
|
||||||
|
buildParameters.awsStackName,
|
||||||
|
'alpine/git',
|
||||||
|
['/bin/sh'],
|
||||||
|
[
|
||||||
|
'-c',
|
||||||
|
`apk update;
|
||||||
|
apk add unzip;
|
||||||
|
apk add git-lfs;
|
||||||
|
apk add jq;
|
||||||
|
# Get source repo for project to be built and game-ci repo for utilties
|
||||||
|
git clone https://${process.env.GITHUB_TOKEN}@github.com/${process.env.GITHUB_REPOSITORY}.git ${buildUid}/${repositoryDirectoryName} -q
|
||||||
|
git clone https://${process.env.GITHUB_TOKEN}@github.com/game-ci/unity-builder.git ${buildUid}/builder -q
|
||||||
|
cd /${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/
|
||||||
|
git checkout $GITHUB_SHA
|
||||||
|
cd /${efsDirectoryName}/
|
||||||
|
# Look for usable cache
|
||||||
|
if [ ! -d ${cacheDirectoryName} ]; then
|
||||||
|
mkdir ${cacheDirectoryName}
|
||||||
|
fi
|
||||||
|
cd ${cacheDirectoryName}
|
||||||
|
if [ ! -d "${branchName}" ]; then
|
||||||
|
mkdir "${branchName}"
|
||||||
|
fi
|
||||||
|
cd "${branchName}"
|
||||||
|
echo " "
|
||||||
|
echo "Cached Libraries for ${branchName} from previous builds:"
|
||||||
|
ls
|
||||||
|
echo " "
|
||||||
|
libDir="/${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/${buildParameters.projectPath}/Library"
|
||||||
|
if [ -d "$libDir" ]; then
|
||||||
|
rm -r "$libDir"
|
||||||
|
echo "Setup .gitignore to ignore Library folder and remove it from builds"
|
||||||
|
fi
|
||||||
|
echo 'Checking cache'
|
||||||
|
# Restore cache
|
||||||
|
latest=$(ls -t | head -1)
|
||||||
|
if [ ! -z "$latest" ]; then
|
||||||
|
echo "Library cache exists from build $latest from ${branchName}"
|
||||||
|
echo 'Creating empty Library folder for cache'
|
||||||
|
mkdir "$libDir"
|
||||||
|
unzip -q $latest -d '/${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/${buildParameters.projectPath}/Library/.'
|
||||||
|
else
|
||||||
|
echo 'Cache does not exist'
|
||||||
|
fi
|
||||||
|
# Print out important directories
|
||||||
|
echo ' '
|
||||||
|
echo 'Repo:'
|
||||||
|
ls /${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/
|
||||||
|
echo ' '
|
||||||
|
echo 'Project:'
|
||||||
|
ls /${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/${buildParameters.projectPath}
|
||||||
|
echo ' '
|
||||||
|
echo 'Library:'
|
||||||
|
ls /${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/${buildParameters.projectPath}/Library/
|
||||||
|
echo ' '
|
||||||
|
`,
|
||||||
|
],
|
||||||
|
`/${efsDirectoryName}`,
|
||||||
|
`/${efsDirectoryName}/`,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: 'GITHUB_SHA',
|
||||||
|
value: process.env.GITHUB_SHA,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{
|
||||||
|
ParameterKey: 'GithubToken',
|
||||||
|
ParameterValue: buildParameters.githubToken,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
core.info('Starting part 2/4 (build unity project)');
|
||||||
|
await this.run(
|
||||||
|
buildUid,
|
||||||
|
buildParameters.awsStackName,
|
||||||
|
baseImage.toString(),
|
||||||
|
['/bin/sh'],
|
||||||
|
[
|
||||||
|
'-c',
|
||||||
|
`
|
||||||
|
cp -r /${efsDirectoryName}/${buildUid}/builder/dist/default-build-script/ /UnityBuilderAction;
|
||||||
|
cp -r /${efsDirectoryName}/${buildUid}/builder/dist/entrypoint.sh /entrypoint.sh;
|
||||||
|
cp -r /${efsDirectoryName}/${buildUid}/builder/dist/steps/ /steps;
|
||||||
|
chmod -R +x /entrypoint.sh;
|
||||||
|
chmod -R +x /steps;
|
||||||
|
/entrypoint.sh;
|
||||||
|
`,
|
||||||
|
],
|
||||||
|
`/${efsDirectoryName}`,
|
||||||
|
`/${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/`,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: 'ContainerMemory',
|
||||||
|
value: buildParameters.remoteBuildMemory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'ContainerCpu',
|
||||||
|
value: buildParameters.remoteBuildCpu,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'GITHUB_WORKSPACE',
|
||||||
|
value: `/${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'PROJECT_PATH',
|
||||||
|
value: buildParameters.projectPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'BUILD_PATH',
|
||||||
|
value: buildParameters.buildPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'BUILD_FILE',
|
||||||
|
value: buildParameters.buildFile,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'BUILD_NAME',
|
||||||
|
value: buildParameters.buildName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'BUILD_METHOD',
|
||||||
|
value: buildParameters.buildMethod,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'CUSTOM_PARAMETERS',
|
||||||
|
value: buildParameters.customParameters,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'BUILD_TARGET',
|
||||||
|
value: buildParameters.platform,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'ANDROID_VERSION_CODE',
|
||||||
|
value: buildParameters.androidVersionCode.toString(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'ANDROID_KEYSTORE_NAME',
|
||||||
|
value: buildParameters.androidKeystoreName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'ANDROID_KEYALIAS_NAME',
|
||||||
|
value: buildParameters.androidKeyaliasName,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{
|
||||||
|
ParameterKey: 'GithubToken',
|
||||||
|
ParameterValue: buildParameters.githubToken,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'UnityLicense',
|
||||||
|
ParameterValue: process.env.UNITY_LICENSE ? process.env.UNITY_LICENSE : '0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'UnityEmail',
|
||||||
|
ParameterValue: process.env.UNITY_EMAIL ? process.env.UNITY_EMAIL : '0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'UnityPassword',
|
||||||
|
ParameterValue: process.env.UNITY_PASSWORD ? process.env.UNITY_PASSWORD : '0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'UnitySerial',
|
||||||
|
ParameterValue: process.env.UNITY_SERIAL ? process.env.UNITY_SERIAL : '0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'AndroidKeystoreBase64',
|
||||||
|
ParameterValue: buildParameters.androidKeystoreBase64 ? buildParameters.androidKeystoreBase64 : '0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'AndroidKeystorePass',
|
||||||
|
ParameterValue: buildParameters.androidKeystorePass ? buildParameters.androidKeystorePass : '0',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'AndroidKeyAliasPass',
|
||||||
|
ParameterValue: buildParameters.androidKeyaliasPass ? buildParameters.androidKeyaliasPass : '0',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
core.info('Starting part 3/4 (zip unity build and Library for caching)');
|
||||||
|
// Cleanup
|
||||||
|
await this.run(
|
||||||
|
buildUid,
|
||||||
|
buildParameters.awsStackName,
|
||||||
|
'alpine',
|
||||||
|
['/bin/sh'],
|
||||||
|
[
|
||||||
|
'-c',
|
||||||
|
`
|
||||||
|
apk update
|
||||||
|
apk add zip
|
||||||
|
cd Library
|
||||||
|
zip -q -r lib-${buildUid}.zip .*
|
||||||
|
mv lib-${buildUid}.zip /${efsDirectoryName}/${cacheDirectoryName}/${branchName}/lib-${buildUid}.zip
|
||||||
|
cd ../../
|
||||||
|
zip -q -r build-${buildUid}.zip ${buildParameters.buildPath}/*
|
||||||
|
mv build-${buildUid}.zip /${efsDirectoryName}/${buildUid}/build-${buildUid}.zip
|
||||||
|
`,
|
||||||
|
],
|
||||||
|
`/${efsDirectoryName}`,
|
||||||
|
`/${efsDirectoryName}/${buildUid}/${repositoryDirectoryName}/${buildParameters.projectPath}`,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: 'GITHUB_SHA',
|
||||||
|
value: process.env.GITHUB_SHA,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{
|
||||||
|
ParameterKey: 'GithubToken',
|
||||||
|
ParameterValue: buildParameters.githubToken,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
core.info('Starting part 4/4 (upload build to s3)');
|
||||||
|
await this.run(
|
||||||
|
buildUid,
|
||||||
|
buildParameters.awsStackName,
|
||||||
|
'amazon/aws-cli',
|
||||||
|
['/bin/sh'],
|
||||||
|
[
|
||||||
|
'-c',
|
||||||
|
`
|
||||||
|
aws s3 cp ${buildUid}/build-${buildUid}.zip s3://game-ci-storage/
|
||||||
|
# no need to upload Library cache for now
|
||||||
|
# aws s3 cp /${efsDirectoryName}/${cacheDirectoryName}/${branchName}/lib-${buildUid}.zip s3://game-ci-storage/
|
||||||
|
rm -r ${buildUid}
|
||||||
|
`,
|
||||||
|
],
|
||||||
|
`/${efsDirectoryName}`,
|
||||||
|
`/${efsDirectoryName}/`,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: 'GITHUB_SHA',
|
||||||
|
value: process.env.GITHUB_SHA,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'AWS_DEFAULT_REGION',
|
||||||
|
value: process.env.AWS_DEFAULT_REGION,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{
|
||||||
|
ParameterKey: 'GithubToken',
|
||||||
|
ParameterValue: buildParameters.githubToken,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'AWSAccessKeyID',
|
||||||
|
ParameterValue: process.env.AWS_ACCESS_KEY_ID,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'AWSSecretAccessKey',
|
||||||
|
ParameterValue: process.env.AWS_SECRET_ACCESS_KEY,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
core.setFailed(error);
|
||||||
|
core.error(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static async run(
|
||||||
|
buildUid: string,
|
||||||
|
stackName: string,
|
||||||
|
image: string,
|
||||||
|
entrypoint: string[],
|
||||||
|
commands,
|
||||||
|
mountdir,
|
||||||
|
workingdir,
|
||||||
|
environment,
|
||||||
|
secrets,
|
||||||
|
) {
|
||||||
|
const ECS = new SDK.ECS();
|
||||||
|
const CF = new SDK.CloudFormation();
|
||||||
|
|
||||||
|
const taskDef = await this.setupCloudFormations(
|
||||||
|
CF,
|
||||||
|
buildUid,
|
||||||
|
stackName,
|
||||||
|
image,
|
||||||
|
entrypoint,
|
||||||
|
commands,
|
||||||
|
mountdir,
|
||||||
|
workingdir,
|
||||||
|
secrets,
|
||||||
|
);
|
||||||
|
|
||||||
|
await this.runTask(taskDef, ECS, CF, environment, buildUid);
|
||||||
|
|
||||||
|
await this.cleanupResources(CF, taskDef);
|
||||||
|
}
|
||||||
|
|
||||||
|
static async setupCloudFormations(
|
||||||
|
CF,
|
||||||
|
buildUid: string,
|
||||||
|
stackName: string,
|
||||||
|
image: string,
|
||||||
|
entrypoint: string[],
|
||||||
|
commands,
|
||||||
|
mountdir,
|
||||||
|
workingdir,
|
||||||
|
secrets,
|
||||||
|
) {
|
||||||
|
const logid = customAlphabet(alphabet, 9)();
|
||||||
|
commands[1] += `
|
||||||
|
echo "${logid}"
|
||||||
|
`;
|
||||||
|
const taskDefStackName = `${stackName}-${buildUid}`;
|
||||||
|
const taskDefCloudFormation = fs.readFileSync(`${__dirname}/cloud-formations/task-def-formation.yml`, 'utf8');
|
||||||
|
await CF.createStack({
|
||||||
|
StackName: taskDefStackName,
|
||||||
|
TemplateBody: taskDefCloudFormation,
|
||||||
|
Parameters: [
|
||||||
|
{
|
||||||
|
ParameterKey: 'ImageUrl',
|
||||||
|
ParameterValue: image,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'ServiceName',
|
||||||
|
ParameterValue: taskDefStackName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'Command',
|
||||||
|
ParameterValue: commands.join(','),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'EntryPoint',
|
||||||
|
ParameterValue: entrypoint.join(','),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'WorkingDirectory',
|
||||||
|
ParameterValue: workingdir,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'EFSMountDirectory',
|
||||||
|
ParameterValue: mountdir,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'BUILDID',
|
||||||
|
ParameterValue: buildUid,
|
||||||
|
},
|
||||||
|
...secrets,
|
||||||
|
],
|
||||||
|
}).promise();
|
||||||
|
core.info('Creating worker cluster...');
|
||||||
|
|
||||||
|
const cleanupTaskDefStackName = `${taskDefStackName}-cleanup`;
|
||||||
|
const cleanupCloudFormation = fs.readFileSync(`${__dirname}/cloud-formations/cloudformation-stack-ttl.yml`, 'utf8');
|
||||||
|
await CF.createStack({
|
||||||
|
StackName: cleanupTaskDefStackName,
|
||||||
|
TemplateBody: cleanupCloudFormation,
|
||||||
|
Capabilities: ['CAPABILITY_IAM'],
|
||||||
|
Parameters: [
|
||||||
|
{
|
||||||
|
ParameterKey: 'StackName',
|
||||||
|
ParameterValue: taskDefStackName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'DeleteStackName',
|
||||||
|
ParameterValue: cleanupTaskDefStackName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'TTL',
|
||||||
|
ParameterValue: '100',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ParameterKey: 'BUILDID',
|
||||||
|
ParameterValue: buildUid,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}).promise();
|
||||||
|
core.info('Creating cleanup cluster...');
|
||||||
|
|
||||||
|
try {
|
||||||
|
await CF.waitFor('stackCreateComplete', { StackName: taskDefStackName }).promise();
|
||||||
|
} catch (error) {
|
||||||
|
core.error(error);
|
||||||
|
}
|
||||||
|
const taskDefResources = await CF.describeStackResources({
|
||||||
|
StackName: taskDefStackName,
|
||||||
|
}).promise();
|
||||||
|
|
||||||
|
const baseResources = await CF.describeStackResources({ StackName: stackName }).promise();
|
||||||
|
|
||||||
|
// in the future we should offer a parameter to choose if you want the guarnteed shutdown.
|
||||||
|
core.info('Worker cluster created successfully (skipping wait for cleanup cluster to be ready)');
|
||||||
|
|
||||||
|
return {
|
||||||
|
taskDefStackName,
|
||||||
|
taskDefCloudFormation,
|
||||||
|
taskDefStackNameTTL: cleanupTaskDefStackName,
|
||||||
|
ttlCloudFormation: cleanupCloudFormation,
|
||||||
|
taskDefResources,
|
||||||
|
baseResources,
|
||||||
|
logid,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static async runTask(taskDef, ECS, CF, environment, buildUid) {
|
||||||
|
const cluster =
|
||||||
|
taskDef.baseResources.StackResources?.find((x) => x.LogicalResourceId === 'ECSCluster')?.PhysicalResourceId || '';
|
||||||
|
const taskDefinition =
|
||||||
|
taskDef.taskDefResources.StackResources?.find((x) => x.LogicalResourceId === 'TaskDefinition')
|
||||||
|
?.PhysicalResourceId || '';
|
||||||
|
const SubnetOne =
|
||||||
|
taskDef.baseResources.StackResources?.find((x) => x.LogicalResourceId === 'PublicSubnetOne')
|
||||||
|
?.PhysicalResourceId || '';
|
||||||
|
const SubnetTwo =
|
||||||
|
taskDef.baseResources.StackResources?.find((x) => x.LogicalResourceId === 'PublicSubnetTwo')
|
||||||
|
?.PhysicalResourceId || '';
|
||||||
|
const ContainerSecurityGroup =
|
||||||
|
taskDef.baseResources.StackResources?.find((x) => x.LogicalResourceId === 'ContainerSecurityGroup')
|
||||||
|
?.PhysicalResourceId || '';
|
||||||
|
const streamName =
|
||||||
|
taskDef.taskDefResources.StackResources?.find((x) => x.LogicalResourceId === 'KinesisStream')
|
||||||
|
?.PhysicalResourceId || '';
|
||||||
|
|
||||||
|
const task = await ECS.runTask({
|
||||||
|
cluster,
|
||||||
|
taskDefinition,
|
||||||
|
platformVersion: '1.4.0',
|
||||||
|
overrides: {
|
||||||
|
containerOverrides: [
|
||||||
|
{
|
||||||
|
name: taskDef.taskDefStackName,
|
||||||
|
environment: [...environment, { name: 'BUILDID', value: buildUid }],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
launchType: 'FARGATE',
|
||||||
|
networkConfiguration: {
|
||||||
|
awsvpcConfiguration: {
|
||||||
|
subnets: [SubnetOne, SubnetTwo],
|
||||||
|
assignPublicIp: 'ENABLED',
|
||||||
|
securityGroups: [ContainerSecurityGroup],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).promise();
|
||||||
|
|
||||||
|
core.info('Task is starting on worker cluster');
|
||||||
|
const taskArn = task.tasks?.[0].taskArn || '';
|
||||||
|
|
||||||
|
try {
|
||||||
|
await ECS.waitFor('tasksRunning', { tasks: [taskArn], cluster }).promise();
|
||||||
|
} catch (error) {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||||
|
const describeTasks = await ECS.describeTasks({
|
||||||
|
tasks: [taskArn],
|
||||||
|
cluster,
|
||||||
|
}).promise();
|
||||||
|
core.info(`Task has ended ${describeTasks.tasks?.[0].containers?.[0].lastStatus}`);
|
||||||
|
core.setFailed(error);
|
||||||
|
core.error(error);
|
||||||
|
}
|
||||||
|
core.info(`Task is running on worker cluster`);
|
||||||
|
await this.streamLogsUntilTaskStops(ECS, CF, taskDef, cluster, taskArn, streamName);
|
||||||
|
await ECS.waitFor('tasksStopped', { cluster, tasks: [taskArn] }).promise();
|
||||||
|
const exitCode = (
|
||||||
|
await ECS.describeTasks({
|
||||||
|
tasks: [taskArn],
|
||||||
|
cluster,
|
||||||
|
}).promise()
|
||||||
|
).tasks?.[0].containers?.[0].exitCode;
|
||||||
|
if (exitCode !== 0) {
|
||||||
|
try {
|
||||||
|
await this.cleanupResources(CF, taskDef);
|
||||||
|
} catch (error) {
|
||||||
|
core.warning(`failed to cleanup ${error}`);
|
||||||
|
}
|
||||||
|
core.error(`job failed with exit code ${exitCode}`);
|
||||||
|
throw new Error(`job failed with exit code ${exitCode}`);
|
||||||
|
} else {
|
||||||
|
core.info(`Task has finished successfully`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static async streamLogsUntilTaskStops(ECS: AWS.ECS, CF, taskDef, clusterName, taskArn, kinesisStreamName) {
|
||||||
|
// watching logs
|
||||||
|
const kinesis = new SDK.Kinesis();
|
||||||
|
|
||||||
|
const getTaskData = async () => {
|
||||||
|
const tasks = await ECS.describeTasks({
|
||||||
|
cluster: clusterName,
|
||||||
|
tasks: [taskArn],
|
||||||
|
}).promise();
|
||||||
|
return tasks.tasks?.[0];
|
||||||
|
};
|
||||||
|
|
||||||
|
const stream = await kinesis
|
||||||
|
.describeStream({
|
||||||
|
StreamName: kinesisStreamName,
|
||||||
|
})
|
||||||
|
.promise();
|
||||||
|
|
||||||
|
let iterator =
|
||||||
|
(
|
||||||
|
await kinesis
|
||||||
|
.getShardIterator({
|
||||||
|
ShardIteratorType: 'TRIM_HORIZON',
|
||||||
|
StreamName: stream.StreamDescription.StreamName,
|
||||||
|
ShardId: stream.StreamDescription.Shards[0].ShardId,
|
||||||
|
})
|
||||||
|
.promise()
|
||||||
|
).ShardIterator || '';
|
||||||
|
|
||||||
|
await CF.waitFor('stackCreateComplete', { StackName: taskDef.taskDefStackNameTTL }).promise();
|
||||||
|
|
||||||
|
core.info(`Task status is ${(await getTaskData())?.lastStatus}`);
|
||||||
|
|
||||||
|
const logBaseUrl = `https://${SDK.config.region}.console.aws.amazon.com/cloudwatch/home?region=${SDK.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
|
||||||
|
core.info(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
|
||||||
|
|
||||||
|
let readingLogs = true;
|
||||||
|
let timestamp: number = 0;
|
||||||
|
while (readingLogs) {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 1500));
|
||||||
|
const taskData = await getTaskData();
|
||||||
|
if (taskData?.lastStatus !== 'RUNNING') {
|
||||||
|
if (timestamp === 0) {
|
||||||
|
core.info('Task stopped, streaming end of logs');
|
||||||
|
timestamp = Date.now();
|
||||||
|
}
|
||||||
|
if (timestamp !== 0 && Date.now() - timestamp < 30000) {
|
||||||
|
core.info('Task status is not RUNNING for 30 seconds, last query for logs');
|
||||||
|
readingLogs = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const records = await kinesis
|
||||||
|
.getRecords({
|
||||||
|
ShardIterator: iterator,
|
||||||
|
})
|
||||||
|
.promise();
|
||||||
|
iterator = records.NextShardIterator || '';
|
||||||
|
if (records.Records.length > 0 && iterator) {
|
||||||
|
for (let index = 0; index < records.Records.length; index++) {
|
||||||
|
const json = JSON.parse(
|
||||||
|
zlib.gunzipSync(Buffer.from(records.Records[index].Data as string, 'base64')).toString('utf8'),
|
||||||
|
);
|
||||||
|
if (json.messageType === 'DATA_MESSAGE') {
|
||||||
|
for (let logEventsIndex = 0; logEventsIndex < json.logEvents.length; logEventsIndex++) {
|
||||||
|
if (json.logEvents[logEventsIndex].message.includes(taskDef.logid)) {
|
||||||
|
core.info('End of task logs');
|
||||||
|
readingLogs = false;
|
||||||
|
} else {
|
||||||
|
core.info(json.logEvents[logEventsIndex].message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static async cleanupResources(CF, taskDef) {
|
||||||
|
await CF.deleteStack({
|
||||||
|
StackName: taskDef.taskDefStackName,
|
||||||
|
}).promise();
|
||||||
|
|
||||||
|
await CF.deleteStack({
|
||||||
|
StackName: taskDef.taskDefStackNameTTL,
|
||||||
|
}).promise();
|
||||||
|
|
||||||
|
await CF.waitFor('stackDeleteComplete', {
|
||||||
|
StackName: taskDef.taskDefStackName,
|
||||||
|
}).promise();
|
||||||
|
|
||||||
|
// Currently too slow and causes too much waiting
|
||||||
|
await CF.waitFor('stackDeleteComplete', {
|
||||||
|
StackName: taskDef.taskDefStackNameTTL,
|
||||||
|
}).promise();
|
||||||
|
|
||||||
|
core.info('Cleanup complete');
|
||||||
|
}
|
||||||
|
|
||||||
|
static onlog(batch) {
|
||||||
|
for (const log of batch) {
|
||||||
|
core.info(`log: ${log}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
export default AWS;
|
@ -33,10 +33,12 @@ class BuildParameters {
|
|||||||
androidKeyaliasName: Input.androidKeyaliasName,
|
androidKeyaliasName: Input.androidKeyaliasName,
|
||||||
androidKeyaliasPass: Input.androidKeyaliasPass,
|
androidKeyaliasPass: Input.androidKeyaliasPass,
|
||||||
customParameters: Input.customParameters,
|
customParameters: Input.customParameters,
|
||||||
|
remoteBuildCluster: Input.remoteBuildCluster,
|
||||||
|
awsStackName: Input.awsStackName,
|
||||||
kubeConfig: Input.kubeConfig,
|
kubeConfig: Input.kubeConfig,
|
||||||
githubToken: Input.githubToken,
|
githubToken: Input.githubToken,
|
||||||
kubeContainerMemory: Input.kubeContainerMemory,
|
remoteBuildMemory: Input.remoteBuildMemory,
|
||||||
kubeContainerCPU: Input.kubeContainerCPU,
|
remoteBuildCpu: Input.remoteBuildCpu,
|
||||||
kubeVolumeSize: Input.kubeVolumeSize,
|
kubeVolumeSize: Input.kubeVolumeSize,
|
||||||
kubeVolume: Input.kubeVolume,
|
kubeVolume: Input.kubeVolume,
|
||||||
};
|
};
|
||||||
|
@ -10,6 +10,7 @@ import Project from './project';
|
|||||||
import Unity from './unity';
|
import Unity from './unity';
|
||||||
import Versioning from './versioning';
|
import Versioning from './versioning';
|
||||||
import Kubernetes from './kubernetes';
|
import Kubernetes from './kubernetes';
|
||||||
|
import AWS from './aws';
|
||||||
|
|
||||||
export {
|
export {
|
||||||
Action,
|
Action,
|
||||||
@ -24,4 +25,5 @@ export {
|
|||||||
Unity,
|
Unity,
|
||||||
Versioning,
|
Versioning,
|
||||||
Kubernetes,
|
Kubernetes,
|
||||||
|
AWS,
|
||||||
};
|
};
|
||||||
|
@ -85,6 +85,14 @@ class Input {
|
|||||||
return core.getInput('customParameters') || '';
|
return core.getInput('customParameters') || '';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static get remoteBuildCluster() {
|
||||||
|
return core.getInput('remoteBuildCluster') || '';
|
||||||
|
}
|
||||||
|
|
||||||
|
static get awsStackName() {
|
||||||
|
return core.getInput('awsStackName') || '';
|
||||||
|
}
|
||||||
|
|
||||||
static get kubeConfig() {
|
static get kubeConfig() {
|
||||||
return core.getInput('kubeConfig') || '';
|
return core.getInput('kubeConfig') || '';
|
||||||
}
|
}
|
||||||
@ -93,12 +101,12 @@ class Input {
|
|||||||
return core.getInput('githubToken') || '';
|
return core.getInput('githubToken') || '';
|
||||||
}
|
}
|
||||||
|
|
||||||
static get kubeContainerMemory() {
|
static get remoteBuildMemory() {
|
||||||
return core.getInput('kubeContainerMemory') || '800M';
|
return core.getInput('remoteBuildMemory') || '800M';
|
||||||
}
|
}
|
||||||
|
|
||||||
static get kubeContainerCPU() {
|
static get remoteBuildCpu() {
|
||||||
return core.getInput('kubeContainerCPU') || '0.25';
|
return core.getInput('remoteBuildCpu') || '0.25';
|
||||||
}
|
}
|
||||||
|
|
||||||
static get kubeVolumeSize() {
|
static get kubeVolumeSize() {
|
||||||
|
@ -1,44 +1,3 @@
|
|||||||
{
|
{
|
||||||
"dependencies": {
|
"dependencies": {}
|
||||||
"com.unity.collab-proxy": "1.2.16",
|
|
||||||
"com.unity.ext.nunit": "1.0.0",
|
|
||||||
"com.unity.ide.rider": "1.1.0",
|
|
||||||
"com.unity.ide.vscode": "1.1.2",
|
|
||||||
"com.unity.package-manager-ui": "2.2.0",
|
|
||||||
"com.unity.test-framework": "1.0.13",
|
|
||||||
"com.unity.textmeshpro": "2.0.1",
|
|
||||||
"com.unity.timeline": "1.1.0",
|
|
||||||
"com.unity.ugui": "1.0.0",
|
|
||||||
"com.unity.modules.ai": "1.0.0",
|
|
||||||
"com.unity.modules.androidjni": "1.0.0",
|
|
||||||
"com.unity.modules.animation": "1.0.0",
|
|
||||||
"com.unity.modules.assetbundle": "1.0.0",
|
|
||||||
"com.unity.modules.audio": "1.0.0",
|
|
||||||
"com.unity.modules.cloth": "1.0.0",
|
|
||||||
"com.unity.modules.director": "1.0.0",
|
|
||||||
"com.unity.modules.imageconversion": "1.0.0",
|
|
||||||
"com.unity.modules.imgui": "1.0.0",
|
|
||||||
"com.unity.modules.jsonserialize": "1.0.0",
|
|
||||||
"com.unity.modules.particlesystem": "1.0.0",
|
|
||||||
"com.unity.modules.physics": "1.0.0",
|
|
||||||
"com.unity.modules.physics2d": "1.0.0",
|
|
||||||
"com.unity.modules.screencapture": "1.0.0",
|
|
||||||
"com.unity.modules.terrain": "1.0.0",
|
|
||||||
"com.unity.modules.terrainphysics": "1.0.0",
|
|
||||||
"com.unity.modules.tilemap": "1.0.0",
|
|
||||||
"com.unity.modules.ui": "1.0.0",
|
|
||||||
"com.unity.modules.uielements": "1.0.0",
|
|
||||||
"com.unity.modules.umbra": "1.0.0",
|
|
||||||
"com.unity.modules.unityanalytics": "1.0.0",
|
|
||||||
"com.unity.modules.unitywebrequest": "1.0.0",
|
|
||||||
"com.unity.modules.unitywebrequestassetbundle": "1.0.0",
|
|
||||||
"com.unity.modules.unitywebrequestaudio": "1.0.0",
|
|
||||||
"com.unity.modules.unitywebrequesttexture": "1.0.0",
|
|
||||||
"com.unity.modules.unitywebrequestwww": "1.0.0",
|
|
||||||
"com.unity.modules.vehicles": "1.0.0",
|
|
||||||
"com.unity.modules.video": "1.0.0",
|
|
||||||
"com.unity.modules.vr": "1.0.0",
|
|
||||||
"com.unity.modules.wind": "1.0.0",
|
|
||||||
"com.unity.modules.xr": "1.0.0"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -5,32 +5,30 @@ GraphicsSettings:
|
|||||||
m_ObjectHideFlags: 0
|
m_ObjectHideFlags: 0
|
||||||
serializedVersion: 12
|
serializedVersion: 12
|
||||||
m_Deferred:
|
m_Deferred:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 69, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_DeferredReflections:
|
m_DeferredReflections:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 74, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_ScreenSpaceShadows:
|
m_ScreenSpaceShadows:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 64, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_LegacyDeferred:
|
m_LegacyDeferred:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 63, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_DepthNormals:
|
m_DepthNormals:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 62, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_MotionVectors:
|
m_MotionVectors:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 75, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_LightHalo:
|
m_LightHalo:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 105, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_LensFlare:
|
m_LensFlare:
|
||||||
m_Mode: 1
|
m_Mode: 0
|
||||||
m_Shader: {fileID: 102, guid: 0000000000000000f000000000000000, type: 0}
|
m_Shader: {fileID: 0}
|
||||||
m_AlwaysIncludedShaders:
|
m_AlwaysIncludedShaders: []
|
||||||
- {fileID: 10753, guid: 0000000000000000f000000000000000, type: 0}
|
|
||||||
- {fileID: 10770, guid: 0000000000000000f000000000000000, type: 0}
|
|
||||||
m_PreloadedShaders: []
|
m_PreloadedShaders: []
|
||||||
m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000,
|
m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000,
|
||||||
type: 0}
|
type: 0}
|
||||||
@ -43,15 +41,16 @@ GraphicsSettings:
|
|||||||
m_LightmapStripping: 0
|
m_LightmapStripping: 0
|
||||||
m_FogStripping: 0
|
m_FogStripping: 0
|
||||||
m_InstancingStripping: 0
|
m_InstancingStripping: 0
|
||||||
m_LightmapKeepPlain: 1
|
m_LightmapKeepPlain: 0
|
||||||
m_LightmapKeepDirCombined: 1
|
m_LightmapKeepDirCombined: 0
|
||||||
m_LightmapKeepDynamicPlain: 1
|
m_LightmapKeepDynamicPlain: 0
|
||||||
m_LightmapKeepDynamicDirCombined: 1
|
m_LightmapKeepDynamicDirCombined: 0
|
||||||
m_LightmapKeepShadowMask: 1
|
m_LightmapKeepShadowMask: 0
|
||||||
m_LightmapKeepSubtractive: 1
|
m_LightmapKeepSubtractive: 0
|
||||||
m_FogKeepLinear: 1
|
m_FogKeepLinear: 1
|
||||||
m_FogKeepExp: 1
|
m_FogKeepExp: 1
|
||||||
m_FogKeepExp2: 1
|
m_FogKeepExp2: 1
|
||||||
m_AlbedoSwatchInfos: []
|
m_AlbedoSwatchInfos: []
|
||||||
m_LightsUseLinearIntensity: 0
|
m_LightsUseLinearIntensity: 0
|
||||||
m_LightsUseColorTemperature: 0
|
m_LightsUseColorTemperature: 0
|
||||||
|
m_LogWhenShaderIsCompiled: 0
|
||||||
|
Loading…
Reference in New Issue
Block a user