Inline Terraform
The Container Driver executes a container supplied as input as part of a Kubernetes Job execution in a target Kubernetes cluster.
The example in this section shows:
- How to reference a
config
Resource Definition to provide the data needed to create a Kubernetes Job in the desired cluster . - How to reference a
config
Resource Definition to create the job with the proper configuration. - How to make the Kubernetes Job able to pull an image from a private registry .
- How to inject the cloud account credentials into the IaC code running in the container via the credentials_config object.
The example is made up out of these files:
k8s-cluster-runner-config.yaml
: provides a connection to a GKE cluster .agent-runner.yaml
: provides the configuration to access a private cluster via the Humanitec Agent.s3.yaml
: in addition to referencing theconfig
Resource Definition, it defines the Terraform scripts to run to provision an S3 bucket whose name is produced appending a random postfix to the application and the environment name. The supplied scripts provide an AWS S3 bucket as place where to store the resource state.
Resource Definitions
agent-runner.yaml
(
view on GitHub
)
:
# This Resource Definition specifies the Humanitec Agent to use for the Runner.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: agent-runner
entity:
driver_type: humanitec/agent
name: agent-runner
type: agent
driver_inputs:
values:
id: my-agent
criteria:
# Change to match the name of the development type you want this to apply to
- env_type: development
class: runner
k8s-cluster-runner-config.yaml
(
view on GitHub
)
:
# This Resource Definition provides configuration values for the Container Driver.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: config-container-driver
entity:
name: config-container-driver
type: config
driver_type: humanitec/echo
driver_inputs:
values:
job:
# Change to match the image you built to run the IaC of your choice
image: ghcr.io/my-registry/container-driver-runner:1.0.1
# Change to match the command to run your image or remove it if you want to use the image entrypoint
command: ["/opt/container"]
# Change to match the mount point of your shared directory
shared_directory: /home/runneruser/workspace
# Change to the namespace name you created to host the Kubernetes Job created by the Driver.
namespace: humanitec-runner
# Change to the service account name with permissions to create secrets/configmaps in the Kubernetes Job namespace you created.
service_account: humanitec-runner-job
# This assumes a secret with the given name exists in the desired namespace and it contains the credentials to pull the job image from the private registry.
pod_template: |
spec:
imagePullSecrets:
- name: ghcr-private-registry
# Change to match the configuration of your target cluster
cluster:
cluster_type: gke
account: my-org/my-gcp-cloud-account
cluster:
loadbalancer: 10.10.10.10
name: my-cluster
project_id: my-project
zone: europe-west2
internal_ip: true
# Change to match the desired agent (if any)
secret_refs:
agent_url:
value: ${resources['agent.default#agent'].outputs.url}
criteria:
# Change to match the name of the development type you want this to apply to
- env_type: development
class: runner
s3.yaml
(
view on GitHub
)
:
# This Resource Definition specifies an `s3` Resource to be provisioned through inline Terraform code.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: aws-s3
entity:
name: aws-s3
type: s3
driver_type: humanitec/container
driver_account: my-aws-cloud-account
driver_inputs:
values:
job: ${resources['config.runner'].outputs.job}
cluster:
cluster_type: ${resources['config.runner'].outputs.cluster.cluster_type}
account: ${resources['config.runner'].outputs.cluster.account}
cluster: ${resources['config.runner'].outputs.cluster.cluster}
# Needed to authenticate to aws TF provider in the TF code passed via files inputs
credentials_config:
environment:
AWS_ACCESS_KEY_ID: AccessKeyId
AWS_SECRET_ACCESS_KEY: SecretAccessKey
files:
terraform.tfvars.json: |
{"REGION": "eu-west-3", "BUCKET": "${context.app.id}-${context.env.id}"}
# Change to match the backend of your choice.
backend.tf: |
terraform {
backend "s3" {
bucket = "my-s3-to-store-tf-state"
key = "${context.res.guresid}/state/terraform.tfstate"
region = "eu-west-3"
}
}
providers.tf: |
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.72.0"
}
}
}
vars.tf: |
variable "REGION" {
type = string
}
variable "BUCKET" {
type = string
}
main.tf: |
provider "aws" {
region = var.REGION
default_tags {
tags = {
CreatedBy = "Humanitec"
}
}
}
resource "random_string" "bucket_suffix" {
length = 5
special = false
upper = false
}
module "aws_s3" {
source = "terraform-aws-modules/s3-bucket/aws"
bucket = format("%s-%s", var.BUCKET, random_string.bucket_suffix.result)
acl = "private"
force_destroy = true
control_object_ownership = true
object_ownership = "BucketOwnerPreferred"
}
output "region" {
value = module.aws_s3.s3_bucket_region
}
output "bucket" {
value = module.aws_s3.s3_bucket_id
}
secret_refs:
cluster:
agent_url:
value: ${resources['config.runner'].outputs.agent_url}
criteria:
# Change to match the name of the development type you want this to apply to
- env_type: development
agent-runner.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "agent-runner" {
driver_type = "humanitec/agent"
id = "agent-runner"
name = "agent-runner"
type = "agent"
driver_inputs = {
values_string = jsonencode({
"id" = "my-agent"
})
}
}
resource "humanitec_resource_definition_criteria" "agent-runner_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.agent-runner.id
env_type = "development"
class = "runner"
}
k8s-cluster-runner-config.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "config-container-driver" {
driver_type = "humanitec/echo"
id = "config-container-driver"
name = "config-container-driver"
type = "config"
driver_inputs = {
values_string = jsonencode({
"job" = {
"image" = "ghcr.io/my-registry/container-driver-runner:1.0.1"
"command" = [
"/opt/container"
]
"shared_directory" = "/home/runneruser/workspace"
"namespace" = "humanitec-runner"
"service_account" = "humanitec-runner-job"
"pod_template" = <<END_OF_TEXT
spec:
imagePullSecrets:
- name: ghcr-private-registry
END_OF_TEXT
}
"cluster" = {
"cluster_type" = "gke"
"account" = "my-org/my-gcp-cloud-account"
"cluster" = {
"loadbalancer" = "10.10.10.10"
"name" = "my-cluster"
"project_id" = "my-project"
"zone" = "europe-west2"
"internal_ip" = true
}
}
})
secret_refs = jsonencode({
"agent_url" = {
"value" = "$${resources['agent.default#agent'].outputs.url}"
}
})
}
}
resource "humanitec_resource_definition_criteria" "config-container-driver_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.config-container-driver.id
env_type = "development"
class = "runner"
}
s3.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "aws-s3" {
driver_type = "humanitec/container"
id = "aws-s3"
name = "aws-s3"
type = "s3"
driver_account = "my-aws-cloud-account"
driver_inputs = {
values_string = jsonencode({
"job" = "$${resources['config.runner'].outputs.job}"
"cluster" = {
"cluster_type" = "$${resources['config.runner'].outputs.cluster.cluster_type}"
"account" = "$${resources['config.runner'].outputs.cluster.account}"
"cluster" = "$${resources['config.runner'].outputs.cluster.cluster}"
}
"credentials_config" = {
"environment" = {
"AWS_ACCESS_KEY_ID" = "AccessKeyId"
"AWS_SECRET_ACCESS_KEY" = "SecretAccessKey"
}
}
"files" = {
"terraform.tfvars.json" = "{\"REGION\": \"eu-west-3\", \"BUCKET\": \"$${context.app.id}-$${context.env.id}\"}\n"
"backend.tf" = <<END_OF_TEXT
terraform {
backend "s3" {
bucket = "my-s3-to-store-tf-state"
key = "$${context.res.guresid}/state/terraform.tfstate"
region = "eu-west-3"
}
}
END_OF_TEXT
"providers.tf" = <<END_OF_TEXT
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.72.0"
}
}
}
END_OF_TEXT
"vars.tf" = <<END_OF_TEXT
variable "REGION" {
type = string
}
variable "BUCKET" {
type = string
}
END_OF_TEXT
"main.tf" = <<END_OF_TEXT
provider "aws" {
region = var.REGION
default_tags {
tags = {
CreatedBy = "Humanitec"
}
}
}
resource "random_string" "bucket_suffix" {
length = 5
special = false
upper = false
}
module "aws_s3" {
source = "terraform-aws-modules/s3-bucket/aws"
bucket = format("%s-%s", var.BUCKET, random_string.bucket_suffix.result)
acl = "private"
force_destroy = true
control_object_ownership = true
object_ownership = "BucketOwnerPreferred"
}
output "region" {
value = module.aws_s3.s3_bucket_region
}
output "bucket" {
value = module.aws_s3.s3_bucket_id
}
END_OF_TEXT
}
})
secret_refs = jsonencode({
"cluster" = {
"agent_url" = {
"value" = "$${resources['config.runner'].outputs.agent_url}"
}
}
})
}
}
resource "humanitec_resource_definition_criteria" "aws-s3_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.aws-s3.id
env_type = "development"
}