Container Driver

Resource Definitions using the Container Driver

This section contains example Resource Definitions using the Container Driver .

The requirements to make these Resource Definitions work with the Orchestrator are:

Inline terraform

The Container Driver  executes a container supplied as input as part of a Kubernetes Job execution in a target Kubernetes cluster.

The example in this section shows:

  • How to reference a config Resource Definition to provide the data needed to create a Kubernetes Job in the desired cluster .
  • How to reference a config Resource Definition to create the job  with the proper configuration.
  • How to make the Kubernetes Job able to pull an image from a private registry .
  • How to inject the cloud account credentials into the IaC code running in the container via the credentials_config  object.

The example is made up out of these files:

  • k8s-cluster-runner-config.yaml: provides a connection to a GKE cluster .
  • agent-runner.yaml: provides the configuration to access a private cluster via the Humanitec Agent.
  • s3.yaml: in addition to referencing the config Resource Definition, it defines the Terraform scripts to run to provision an S3 bucket whose name is produced appending a random postfix to the application and the environment name. The supplied scripts provide an AWS S3 bucket as place where to store the resource state.

agent-runner.tf (view on GitHub ) :

resource "humanitec_resource_definition" "agent-runner" {
  driver_type = "humanitec/agent"
  id             = "agent-runner"
  name           = "agent-runner"
  type           = "agent"
  driver_inputs  = {
    values_string  = jsonencode({
      "id" = "my-agent"
    })
  }
}

resource "humanitec_resource_definition_criteria" "agent-runner_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.agent-runner.id
  env_type               = "development"
  class                  = "runner"
}


agent-runner.yaml (view on GitHub ) :

# This Resource Definition specifies the Humanitec Agent to use for the Runner.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: agent-runner
entity:
  driver_type: humanitec/agent
  name: agent-runner
  type: agent
  driver_inputs:
    values:
      id: my-agent
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development
      class: runner      

k8s-cluster-runner-config.tf (view on GitHub ) :

resource "humanitec_resource_definition" "config-container-driver" {
  driver_type = "humanitec/echo"
  id             = "config-container-driver"
  name           = "config-container-driver"
  type           = "config"
  driver_inputs  = {
    values_string  = jsonencode({
      "job" = {
        "image" = "ghcr.io/my-registry/container-driver-runner:1.0.1"
        "command" = [
        "/opt/container"
        ]
        "shared_directory" = "/home/runneruser/workspace"
        "namespace"        = "humanitec-runner"
        "service_account"  = "humanitec-runner"
        "pod_template"     = <<END_OF_TEXT
spec:
  imagePullSecrets:
    - name: ghcr-private-registry
END_OF_TEXT
      }
      "cluster" = {
        "account" = "my-org/my-aws-cloud-account"
        "cluster" = {
          "cluster_type" = "eks"
          "loadbalancer" = "10.10.10.10"
          "name"         = "my-demo-cluster"
          "region"       = "eu-west-3"
        }
      }
    })
    secret_refs = jsonencode({
      "agent_url" = {
        "value" = "$${resources['agent.default#agent'].outputs.url}"
      }
    })
  }
}

resource "humanitec_resource_definition_criteria" "config-container-driver_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.config-container-driver.id
  env_type               = "development"
  class                  = "runner"
}


k8s-cluster-runner-config.yaml (view on GitHub ) :

# This Resource Definition provides configuration values for the Container Driver.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: config-container-driver
entity:
  name: config-container-driver
  type: config
  driver_type: humanitec/echo
  driver_inputs:
    values:
      job:
        # Change to match the image you built to run the IaC of your choice
        image: ghcr.io/my-registry/container-driver-runner:1.0.1
        # Change to match the command to run your image or remove it if you want to use the image entrypoint
        command: ["/opt/container"]
        # Change to match the mount point of your shared directory
        shared_directory: /home/runneruser/workspace
        # Change to the namespace name you created to host the Kubernetes Job created by the Driver.
        namespace: humanitec-runner
        # Change to the service account name with permissions to create secrets/configmaps in the Kubernetes Job namespace you created.
        service_account: humanitec-runner
        # This assumes a secret with the given name exists in the desired namespace and it contains the credentials to pull the job image from the private registry.
        pod_template: |
          spec:
            imagePullSecrets:
              - name: ghcr-private-registry
      # Change to match the configuration of your target cluster
      cluster:
        account: my-org/my-aws-cloud-account
        cluster:
          cluster_type: eks
          loadbalancer: 10.10.10.10
          name: my-demo-cluster
          region: eu-west-3
    # Change to match the desired agent (if any)
    secret_refs:
      agent_url: 
        value: ${resources['agent.default#agent'].outputs.url}
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development
      class: runner


s3.tf (view on GitHub ) :

resource "humanitec_resource_definition" "aws-s3" {
  driver_type    = "humanitec/container"
  id             = "aws-s3"
  name           = "aws-s3"
  type           = "s3"
  driver_account = "my-aws-cloud-account"
  driver_inputs = {
    values_string = jsonencode({
      "job"     = "$${resources['config.runner'].outputs.job}"
      "cluster" = "$${resources['config.runner'].outputs.cluster}"
      "credentials_config" = {
        "environment" = {
          "AWS_ACCESS_KEY_ID"     = "AccessKeyId"
          "AWS_SECRET_ACCESS_KEY" = "SecretAccessKey"
        }
      }
      "files" = {
        "terraform.tfvars.json" = "{\"REGION\": \"eu-west-3\", \"BUCKET\": \"$${context.app.id}-$${context.env.id}\"}\n"
        "backend.tf" = <<END_OF_TEXT
terraform {
  backend "s3" {
    bucket = "my-s3-to-store-tf-state"
    key = "$${context.res.guresid}/state/terraform.tfstate"
    region = "eu-west-3"
  }
}
END_OF_TEXT
        "providers.tf" = <<END_OF_TEXT
terraform {
  required_providers {
    aws = {
      source = "hashicorp/aws"
      version = "~> 5.72.0"
    }
  }
}
END_OF_TEXT
        "vars.tf" = <<END_OF_TEXT
variable "REGION" {
    type = string
}

variable "BUCKET" {
    type = string
}
END_OF_TEXT
        "main.tf" = <<END_OF_TEXT
provider "aws" {
  region     = var.REGION
  default_tags {
    tags = {
      CreatedBy = "Humanitec"
    }
  }
}

resource "random_string" "bucket_suffix" {
  length           = 5
  special          = false
  upper            = false
}

module "aws_s3" {
  source = "terraform-aws-modules/s3-bucket/aws"
  bucket = format("%s-%s", var.BUCKET, random_string.bucket_suffix.result)
  acl    = "private"
  force_destroy = true
  control_object_ownership = true
  object_ownership         = "BucketOwnerPreferred"
}

output "region" {
  value = module.aws_s3.s3_bucket_region
}

output "bucket" {
  value = module.aws_s3.s3_bucket_id
}
END_OF_TEXT
      }
    })
    secret_refs = jsonencode({
      "cluster" = {
        "agent_url" = {
          "value" = "$${resources['config.runner'].outputs.agent_url}"
        }
      }
    })
  }
}

resource "humanitec_resource_definition_criteria" "aws-s3_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.aws-s3.id
  env_type               = "development"
}


s3.yaml (view on GitHub ) :

# This Resource Definition specifies an `s3` Resource to be provisioned through inline Terraform code.
apiVersion: entity.humanitec.io/v1b1

kind: Definition
metadata:
  id: aws-s3
entity:
  name: aws-s3
  type: s3
  driver_type: humanitec/container
  driver_account: my-aws-cloud-account
  driver_inputs:
    values:
      job: ${resources['config.runner'].outputs.job}
      cluster: ${resources['config.runner'].outputs.cluster}
      # Needed to authenticate to aws TF provider in the TF code passed via files inputs
      credentials_config:
        environment:
          AWS_ACCESS_KEY_ID: AccessKeyId
          AWS_SECRET_ACCESS_KEY: SecretAccessKey
      files:
        terraform.tfvars.json:  | 
          {"REGION": "eu-west-3", "BUCKET": "${context.app.id}-${context.env.id}"}
        # Change to match the backend of your choice.
        backend.tf: |
          terraform {
            backend "s3" {
              bucket = "my-s3-to-store-tf-state"
              key = "${context.res.guresid}/state/terraform.tfstate"
              region = "eu-west-3"
            }
          }
        providers.tf: |
          terraform {
            required_providers {
              aws = {
                source = "hashicorp/aws"
                version = "~> 5.72.0"
              }
            }
          }
        vars.tf: |
          variable "REGION" {
              type = string
          }

          variable "BUCKET" {
              type = string
          }
        main.tf: |
          provider "aws" {
            region     = var.REGION
            default_tags {
              tags = {
                CreatedBy = "Humanitec"
              }
            }
          }

          resource "random_string" "bucket_suffix" {
            length           = 5
            special          = false
            upper            = false
          }

          module "aws_s3" {
            source = "terraform-aws-modules/s3-bucket/aws"
            bucket = format("%s-%s", var.BUCKET, random_string.bucket_suffix.result)
            acl    = "private"
            force_destroy = true
            control_object_ownership = true
            object_ownership         = "BucketOwnerPreferred"
          }

          output "region" {
            value = module.aws_s3.s3_bucket_region
          }

          output "bucket" {
            value = module.aws_s3.s3_bucket_id
          }
    secret_refs:
      cluster:
        agent_url: 
          value: ${resources['config.runner'].outputs.agent_url}
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development


Private git repo

The Container Driver  executes a container supplied as input as part of a Kubernetes Job execution in a target Kubernetes cluster.

The example in this section shows:

  • How to reference a config Resource Definition to provide the data needed to create a Kubernetes Job in the desired cluster .
  • How to reference a config Resource Definition to create the job  with the proper configuration.
  • How to make the Kubernetes Job able to pull an image from a private registry .
  • How to inject the cloud account credentials into the IaC code running in the container via the credentials_config  object.
  • How to fetch the IaC scripts from a private Repository, via non-secret  and secret  fields.

The example is made up out of these files:

  • k8s-cluster-runner-config.yaml: provides a connection to a GKE cluster .
  • agent-runner.yaml: provides the configuration to access a private cluster via the Humanitec Agent.
  • s3.yaml: in addition to referencing the config Resource Definition, it defines how to fetch the Terraform scripts from a private Github Repository to provision an S3 bucket. This also provides via file an AWS S3 bucket as place where to store the resource state.

agent-runner.tf (view on GitHub ) :

resource "humanitec_resource_definition" "agent-runner" {
  driver_type = "humanitec/agent"
  id             = "agent-runner"
  name           = "agent-runner"
  type           = "agent"
  driver_inputs  = {
    values_string  = jsonencode({
      "id" = "my-agent"
    })
  }
}

resource "humanitec_resource_definition_criteria" "agent-runner_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.agent-runner.id
  env_type               = "development"
  class                  = "runner"
}


agent-runner.yaml (view on GitHub ) :

# This Resource Definition specifies the Humanitec Agent to use for the Runner.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: agent-runner
entity:
  driver_type: humanitec/agent
  name: agent-runner
  type: agent
  driver_inputs:
    values:
      id: my-agent
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development
      class: runner      

k8s-cluster-runner-config.tf (view on GitHub ) :

resource "humanitec_resource_definition" "config-container-driver" {
  driver_type = "humanitec/echo"
  id          = "config-container-driver"
  name        = "config-container-driver"
  type        = "config"
  driver_inputs = {
    values_string = jsonencode({
      "job" = {
        "image" = "ghcr.io/my-registry/container-driver-runner:1.0.1"
        "command" = [
          "/opt/container"
        ]
        "shared_directory" = "/home/runneruser/workspace"
        "namespace"        = "humanitec-runner"
        "service_account"  = "humanitec-runner"
        "pod_template"     = <<END_OF_TEXT
spec:
  imagePullSecrets:
    - name: ghcr-private-registry
END_OF_TEXT
      }
      "cluster" = {
        "account" = "my-org/my-aws-cloud-account"
        "cluster" = {
          "cluster_type" = "eks"
          "loadbalancer" = "10.10.10.10"
          "name"         = "my-demo-cluster"
          "region"       = "eu-west-3"
        }
      }
    })
    secret_refs = jsonencode({
      "agent_url" = {
        "value" = "$${resources['agent.default#agent'].outputs.url}"
      }
    })
  }
}

resource "humanitec_resource_definition_criteria" "config-container-driver_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.config-container-driver.id
  env_type               = "development"
  class                  = "runner"
}


k8s-cluster-runner-config.yaml (view on GitHub ) :

# This Resource Definition provides configuration values for the Container Driver.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: config-container-driver
entity:
  name: config-container-driver
  type: config
  driver_type: humanitec/echo
  driver_inputs:
    values:
      job:
        # Change to match the image you built to run the IaC of your choice
        image: ghcr.io/my-registry/container-driver-runner:1.0.1
        # Change to match the command to run your image or remove it if you want to use the image entrypoint
        command: ["/opt/container"]
        # Change to match the mount point of your shared directory
        shared_directory: /home/runneruser/workspace
        # Change to the namespace name you created to host the Kubernetes Job created by the Driver.
        namespace: humanitec-runner
        # Change to the service account name with permissions to create secrets/configmaps in the Kubernetes Job namespace you created.
        service_account: humanitec-runner
        # This assumes a secret with the given name exists in the desired namespace and it contains the credentials to pull the job image from the private registry.
        pod_template: |
          spec:
            imagePullSecrets:
              - name: ghcr-private-registry
      # Change to match the configuration of your target cluster
      cluster:
        account: my-org/my-aws-cloud-account
        cluster:
          cluster_type: eks
          loadbalancer: 10.10.10.10
          name: my-demo-cluster
          region: eu-west-3
    # Change to match the desired agent (if any)
    secret_refs:
      agent_url: 
        value: ${resources['agent.default#agent'].outputs.url}
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development
      class: runner


s3.tf (view on GitHub ) :

resource "humanitec_resource_definition" "aws-s3" {
  driver_type    = "humanitec/container"
  id             = "aws-s3"
  name           = "aws-s3"
  type           = "s3"
  driver_account = "my-aws-cloud-account"
  driver_inputs = {
    values_string = jsonencode({
      "job"     = "$${resources['config.runner'].outputs.job}"
      "cluster" = "$${resources['config.runner'].outputs.cluster}"
      "credentials_config" = {
        "environment" = {
          "AWS_ACCESS_KEY_ID"     = "AccessKeyId"
          "AWS_SECRET_ACCESS_KEY" = "SecretAccessKey"
        }
      }
      "source" = {
        "ref" = "refs/heads/main"
        "url" = "[email protected]:my-org/my-repo.git"
      }
      "files" = {
        "terraform.tfvars.json" = "{\"REGION\": \"eu-west-3\", \"BUCKET\": \"$${context.app.id}-$${context.env.id}\"}\n"
        "backend.tf"            = <<END_OF_TEXT
terraform {
  backend "s3" {
    bucket = "my-s3-to-store-tf-state"
    key = "$${context.res.guresid}/state/terraform.tfstate"
    region = "eu-west-3"
  }
}
END_OF_TEXT
      }
    })
    secret_refs = jsonencode({
      "cluster" = {
        "agent_url" = {
          "value" = "$${resources['config.runner'].outputs.agent_url}"
        }
      }
      "source" = {
        "ssh_key" = {
          "store" = "my-secret-store"
          "ref"   = "my-path-to-git-ssh-key"
        }
      }
    })
  }
}

resource "humanitec_resource_definition_criteria" "aws-s3_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.aws-s3.id
  env_type               = "development"
}


s3.yaml (view on GitHub ) :

# This Resource Definition specifies an `s3` Resource to be provisioned through Terraform code read from a private Git repository accessed via an SSH key.
apiVersion: entity.humanitec.io/v1b1

kind: Definition
metadata:
  id: aws-s3
entity:
  name: aws-s3
  type: s3
  driver_type: humanitec/container
  driver_account: my-aws-cloud-account
  driver_inputs:
    values:
      job: ${resources['config.runner'].outputs.job}
      cluster: ${resources['config.runner'].outputs.cluster}
      # Needed to authenticate to aws TF provider in the TF code passed via files inputs
      credentials_config:
        environment:
          AWS_ACCESS_KEY_ID: AccessKeyId
          AWS_SECRET_ACCESS_KEY: SecretAccessKey
      # Change to match your repository
      source:
        ref: refs/heads/main
        url: [email protected]:my-org/my-repo.git
        # When using a GitHub personal access token, use the HTTPS URL:
        # url: https://github.com/my-org/my-repo.git
      files:
        terraform.tfvars.json:  | 
          {"REGION": "eu-west-3", "BUCKET": "${context.app.id}-${context.env.id}"}
        # Change to match the backend of your choice.
        backend.tf: |
          terraform {
            backend "s3" {
              bucket = "my-s3-to-store-tf-state"
              key = "${context.res.guresid}/state/terraform.tfstate"
              region = "eu-west-3"
            }
          }
    secret_refs:
      cluster:
        agent_url: 
          value: ${resources['config.runner'].outputs.agent_url}
      # Change to match where your ssh key is stored
      source:
        ssh_key:
          store: my-secret-store
          ref: my-path-to-git-ssh-key
        # Alternative to ssh_key: password or Personal Access Token
        # password:
        #   store: my-secret-store
        #   ref: my-path-to-git-password-or-token
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development


Secrets

This example shows how to inject secret values into a container run by the Container Driver  from a downstream Resource in the Graph.

The same pattern applies to any Virtual Driver that wraps the Container Driver.

The example consists of these files:

  • mysql.yaml: A Resource Definition of type mysql using the Container Driver. It uses an external Git repository to retrieve some IaC code, e.g. Terraform, for execution. The key element for this example is the setting of environment variables for the Container Driver runner in this section. The variables values themselves are obtained as secret outputs from a mysql-instance Resource:
entity:
  driver_inputs:
    secret_refs:
      job:
        variables:
          TF_VAR_...
  • mysql-instance.yaml: A Resource Definition of type mysql-instance using the Echo Driver, so the Orchestrator is not managing the instance but just providing access data to the upstream mysql Resource.
  • runner-config.yaml: Externalized configuration values for the Container Driver in a config Resource Definition

mysql-instance.yaml (view on GitHub ) :

# This Resource Definition uses the Echo Driver to represent a mysql-instance
# without managing it.
# In particular, it returns secret outputs for username and password, to be
# consumed by other upstream resources in the Graph.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: container-driver-secrets-example-mysql-instance
entity:
  name: container-driver-secrets-example-mysql-instance
  type: mysql-instance
  driver_type: humanitec/echo
  driver_inputs:
    values:
      name: my-instance
      host: products.mysql.dev.example.com
      port: 5432
    secret_refs:
      # Read secret values from a secret store
      # which needs to be configured on the target cluster
      username:
        store: my-secret-store
        ref: my-instance-username
      password:
        store: my-secret-store
        ref: my-instance-password
  # Adjust matching criteria as required
  criteria:
  - app_id: container-driver-secrets-example

mysql.yaml (view on GitHub ) :

# This Resource Definition shows how to inject secret values into a container run by the Container Driver
# to create a MySQL database in a MySQL instance. It does not show how to actually create the db.
# It reads secret values from a downstream "mysql-instance" Resource and shows how to inject them
# as environment variables into the runner container
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: container-driver-secrets-example-mysql
entity:
  name: container-driver-secrets-example-mysql
  type: mysql
  driver_type: humanitec/container
  driver_account: my-aws-cloud-account
  driver_inputs:
    values:
      # Source job and cluster configuration from a config resource
      job: ${resources['config.runner'].outputs.job}
      cluster: ${resources['config.runner'].outputs.cluster}
      # Needed to authenticate to aws TF provider in the TF code passed via files inputs
      # These values are provided via the `driver_account` configured above
      credentials_config:
        environment:
          AWS_ACCESS_KEY_ID: AccessKeyId
          AWS_SECRET_ACCESS_KEY: SecretAccessKey
      # Change to match your IaC code repository
      source:
        ref: refs/heads/main
        url: https://github.com/my-org/my-repo.git
    # All references to secret outputs of another resource MUST be placed in the `secret_refs` section
    secret_refs:
      job:
        # Setting environment variables in the container to be picked up by Terraform code
        variables:
          TF_VAR_mysql-instance-username:
            value: ${resources['mysql-instance.default'].outputs.username}
          TF_VAR_mysql-instance-password:
            value: ${resources['mysql-instance.default'].outputs.password}
      source:
        # Read a GitHub Personal Access Token for repo access from a secret store
        password:
          store: my-secret-store
          ref: my-path-to-git-token
  # Adjust matching criteria as required
  criteria:
  - app_id: container-driver-secrets-example



runner-config.yaml (view on GitHub ) :

# This Resource Definition provides configuration values for the Container Driver
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
  id: container-driver-secrets-example-config
entity:
  name: container-driver-secrets-example-config
  type: config
  driver_type: humanitec/echo
  driver_inputs:
    values:
      job:
        # Change to match the image you built to run the IaC of your choice
        image: ghcr.io/my-registry/container-driver-runner:1.0.1
        # Change to match the command to run your image or remove it if you want to use the image entrypoint
        command: ["/opt/container"]
        # Change to match the mount point of your shared directory
        shared_directory: /home/runneruser/workspace
      # Change to match the configuration of your target cluster
      cluster:
        account: my-org/my-aws-cloud-account
        cluster:
          cluster_type: eks
          loadbalancer: 10.10.10.10
          name: my-demo-cluster
          region: eu-west-3
    # Change to match the desired agent (if any)
    secret_refs:
      agent_url: 
        value: ${resources['agent.default#agent'].outputs.url}
  # Adjust matching criteria as required
  criteria:
  - app_id: container-driver-secrets-example
    class: runner

Terraform

The Container Driver  executes a container supplied as input as part of a Kubernetes Job execution in a target Kubernetes cluster.

The example in this section shows:

  • How to use the official Hashicorp maintained Terraform image with the container driver
  • How to inject the cloud account credentials into the IaC code running in the container via the credentials_config  object.

The example is made up out of these files:

  • s3.yaml: alongside the terraform code, a Shell script called run.sh is also included. This invokes the terraform command in the image.

This example requires:

  • A managed cluster - one of AKS, EKS or GKE
  • An agent configured to match res_id runner

NOTE

Due to the Hashicorp License change for Terraform  Humanitec is unable to provide examples of using the Container Driver with versions of the hashicorp/terraform image higher than 1.5.7 which is the last image released under the MPL 2.0.


s3.tf (view on GitHub ) :

resource "humanitec_resource_definition" "aws-s3" {
  driver_type    = "humanitec/container"
  id             = "aws-s3"
  name           = "aws-s3"
  type           = "s3"
  driver_account = "aws-ref-arch"
  driver_inputs = {
    values_string = jsonencode({
      "job" = {
        "image" = "hashicorp/terraform:1.5.7"
        "command" = [
          "/bin/sh",
          "/home/runneruser/workspace/run.sh"
        ]
        "shared_directory" = "/home/runneruser/workspace"
        "namespace"        = "humanitec-runner"
        "service_account"  = "humanitec-runner"
      }
      "cluster" = {
        "account" = "my-org/my-aws-cloud-account"
        "cluster" = "$${resources[\"k8s-cluster.default#k8s-cluster\"].values}"
      }
      "credentials_config" = {
        "environment" = {
          "AWS_ACCESS_KEY_ID"     = "AccessKeyId"
          "AWS_SECRET_ACCESS_KEY" = "SecretAccessKey"
          "AWS_SESSION_TOKEN"     = "SessionToken"
        }
      }
      "files" = {
        "run.sh"                = <<END_OF_TEXT
#!/bin/sh

# NOTE: This script is written to be POSIX shell compatible.

# run_cmd runs the command provided in its input args.
# If the command fails, STDERR from the command is written to ERROR_FILE and
# also to STDERR and the script exists with exit code 1.
run_cmd ()
{
    if ! "$@" 2> "$\{ERROR_FILE}"
    then
        echo
        echo "FAILED: $@"
        cat "$\{ERROR_FILE}" 1>&2
        exit 1
    fi
}

if ! [ -d "$\{SCRIPTS_DIRECTORY}" ]
then
    echo "SCRIPTS_DIRECTORY does not exist: \"$\{SCRIPTS_DIRECTORY}"\" > "$\{ERROR_FILE}"
    cat "$\{ERROR_FILE}" 1>&2
    exit 1
fi

run_cmd cd "$\{SCRIPTS_DIRECTORY}"

if [ "$\{ACTION}" = "create" ]
then
    run_cmd terraform init -no-color

    run_cmd terraform apply -auto-approve -input=false -no-color
    
    # Terraform can export its outputs with the following schema:
    #   {
    #     "output-name": {
    #       "sensitive": bool: true of output is marked sensitive
    #       "type": string: the Terraform type of the output
    #       "value": any: the JSON representation of the output value
    #     },
    #     ...
    #   }
    # 
    # The container driver, expects a simple map of output-name: value
    #
    # The hashicorp/terraform does not provide any special tooling to
    # manipulate JSON. However, terraform accepts inputs and JSON so can use
    # an additional run of the terraform CLI to manipulate the JSON.

    # Create a new directory do convert the JSON outputs.
    mkdir output_parse_container

    # Generate a tfvars file in JSON format without any JSON tooling with a
    # single variable of "in".
    echo '{"in":' > output_parse_container/terraform.tfvars.json
    run_cmd terraform output -json >> output_parse_container/terraform.tfvars.json
    echo '}' >> output_parse_container/terraform.tfvars.json

    # Move to a different directory and therefore a different terraform context
    # This means if we run terraform apply, it will be independent from the main
    # request above.
    run_cmd cd output_parse_container

    # Inject the Terraform code that converts the input into 2 separate maps
    # depending on whether the value is marked as sensitive or not.
    echo 'variable "in" { type = map }
output "values" { value = {for k, v in var.in: k => v.value if !v.sensitive} }
output "secrets" { value = {for k, v in var.in: k => v.value if v.sensitive} }' > parse.tf

    echo
    echo "Converting outputs using terraform apply"

    # This terraform apply is just operating on the terraform.tfvars.json
    # created above. It is running in a new terraform context and so will
    # not influence the infrastructure deployed above
    # Note: no need to run terraform init as no providers are required
    run_cmd terraform apply -auto-approve -input=false -no-color > /dev/null

    run_cmd terraform output -json values > "$\{OUTPUTS_FILE}"

    run_cmd terraform output -json secrets > "$\{SECRET_OUTPUTS_FILE}"

    echo "Done."

elif [ "$\{ACTION}" = "destroy" ]
then
    run_cmd terraform init -no-color

    run_cmd terraform destroy -auto-approve -input=false -no-color

else
  echo "unrecognized ACTION: \"$\{ACTION}"\" > "$\{ERROR_FILE}"
  cat "$\{ERROR_FILE}" 1>&2
  exit 1
fi
END_OF_TEXT
        "terraform.tfvars.json" = "{\"REGION\": \"eu-west-3\", \"BUCKET\": \"$${context.app.id}-$${context.env.id}\"}\n"
        "backend.tf"            = <<END_OF_TEXT
terraform {
  backend "s3" {
    bucket = "my-s3-to-store-tf-state"
    key = "$${context.res.guresid}/state/terraform.tfstate"
    region = "eu-central-1"
  }
}
END_OF_TEXT
        "providers.tf"          = <<END_OF_TEXT
terraform {
  required_providers {
    aws = {
      source = "hashicorp/aws"
      version = "~> 5.72.0"
    }
  }
}
END_OF_TEXT
        "vars.tf"               = <<END_OF_TEXT
variable "REGION" {
    type = string
}

variable "BUCKET" {
    type = string
}
END_OF_TEXT
        "main.tf"               = <<END_OF_TEXT
provider "aws" {
  region     = var.REGION
  default_tags {
    tags = {
      CreatedBy = "Humanitec"
    }
  }
}

resource "random_string" "bucket_suffix" {
  length           = 5
  special          = false
  upper            = false
}

module "aws_s3" {
  source = "terraform-aws-modules/s3-bucket/aws"
  bucket = format("%s-%s", var.BUCKET, random_string.bucket_suffix.result)
  acl    = "private"
  force_destroy = true
  control_object_ownership = true
  object_ownership         = "BucketOwnerPreferred"
}

output "region" {
  value = module.aws_s3.s3_bucket_region
}

output "bucket" {
  value = module.aws_s3.s3_bucket_id
}
END_OF_TEXT
      }
    })
    secret_refs = jsonencode({
      "cluster" = {
        "agent_url" = {
          "value" = "$${resources['agent.default#runner'].outputs.url}"
        }
      }
    })
  }
}

resource "humanitec_resource_definition_criteria" "aws-s3_criteria_0" {
  resource_definition_id = resource.humanitec_resource_definition.aws-s3.id
  env_type               = "development"
  app_id                 = "container-test"
}


s3.yaml (view on GitHub ) :

# This Resource Definition specifies an `s3` Resource to be provisioned through inline Terraform code.
apiVersion: entity.humanitec.io/v1b1

kind: Definition
metadata:
  id: aws-s3
entity:
  name: aws-s3
  type: s3
  driver_type: humanitec/container
  driver_account: aws-ref-arch
  driver_inputs:
    values:
      job: 
        # Due to the Hashicorp BSL License, Humanitec cannot provide
        # examples using any of the BSL covered versions of terraform
        # i.e versions higher than 1.5.7
        image: hashicorp/terraform:1.5.7

        command: ["/bin/sh", "/home/runneruser/workspace/run.sh"]
        shared_directory: /home/runneruser/workspace

        # Change to the namespace name you created to host the Kubernetes Job created by the Driver.
        namespace: humanitec-runner
        # Change to the service account name with permissions to create secrets/configmaps in the Kubernetes Job namespace you created.
        service_account: humanitec-runner
      cluster:
        # Update to your cloud account
        account: my-org/my-aws-cloud-account
        cluster: ${resources["k8s-cluster.default#k8s-cluster"].values}

      # Needed to authenticate to aws TF provider in the TF code passed via files inputs
      credentials_config:
        environment:
          AWS_ACCESS_KEY_ID: AccessKeyId
          AWS_SECRET_ACCESS_KEY: SecretAccessKey
          AWS_SESSION_TOKEN: SessionToken
      files:
        run.sh: |
          #!/bin/sh

          # NOTE: This script is written to be POSIX shell compatible.

          # run_cmd runs the command provided in its input args.
          # If the command fails, STDERR from the command is written to ERROR_FILE and
          # also to STDERR and the script exists with exit code 1.
          run_cmd ()
          {
              if ! "$@" 2> "$\{ERROR_FILE}"
              then
                  echo
                  echo "FAILED: $@"
                  cat "$\{ERROR_FILE}" 1>&2
                  exit 1
              fi
          }

          if ! [ -d "$\{SCRIPTS_DIRECTORY}" ]
          then
              echo "SCRIPTS_DIRECTORY does not exist: \"$\{SCRIPTS_DIRECTORY}"\" > "$\{ERROR_FILE}"
              cat "$\{ERROR_FILE}" 1>&2
              exit 1
          fi

          run_cmd cd "$\{SCRIPTS_DIRECTORY}"

          if [ "$\{ACTION}" = "create" ]
          then
              run_cmd terraform init -no-color

              run_cmd terraform apply -auto-approve -input=false -no-color
              
              # Terraform can export its outputs with the following schema:
              #   {
              #     "output-name": {
              #       "sensitive": bool: true of output is marked sensitive
              #       "type": string: the Terraform type of the output
              #       "value": any: the JSON representation of the output value
              #     },
              #     ...
              #   }
              # 
              # The container driver, expects a simple map of output-name: value
              #
              # The hashicorp/terraform does not provide any special tooling to
              # manipulate JSON. However, terraform accepts inputs and JSON so can use
              # an additional run of the terraform CLI to manipulate the JSON.

              # Create a new directory do convert the JSON outputs.
              mkdir output_parse_container

              # Generate a tfvars file in JSON format without any JSON tooling with a
              # single variable of "in".
              echo '{"in":' > output_parse_container/terraform.tfvars.json
              run_cmd terraform output -json >> output_parse_container/terraform.tfvars.json
              echo '}' >> output_parse_container/terraform.tfvars.json

              # Move to a different directory and therefore a different terraform context
              # This means if we run terraform apply, it will be independent from the main
              # request above.
              run_cmd cd output_parse_container

              # Inject the Terraform code that converts the input into 2 separate maps
              # depending on whether the value is marked as sensitive or not.
              echo 'variable "in" { type = map }
          output "values" { value = {for k, v in var.in: k => v.value if !v.sensitive} }
          output "secrets" { value = {for k, v in var.in: k => v.value if v.sensitive} }' > parse.tf

              echo
              echo "Converting outputs using terraform apply"

              # This terraform apply is just operating on the terraform.tfvars.json
              # created above. It is running in a new terraform context and so will
              # not influence the infrastructure deployed above
              # Note: no need to run terraform init as no providers are required
              run_cmd terraform apply -auto-approve -input=false -no-color > /dev/null

              run_cmd terraform output -json values > "$\{OUTPUTS_FILE}"

              run_cmd terraform output -json secrets > "$\{SECRET_OUTPUTS_FILE}"

              echo "Done."

          elif [ "$\{ACTION}" = "destroy" ]
          then
              run_cmd terraform init -no-color

              run_cmd terraform destroy -auto-approve -input=false -no-color

          else
            echo "unrecognized ACTION: \"$\{ACTION}"\" > "$\{ERROR_FILE}"
            cat "$\{ERROR_FILE}" 1>&2
            exit 1
          fi

        terraform.tfvars.json:  | 
          {"REGION": "eu-west-3", "BUCKET": "${context.app.id}-${context.env.id}"}
        # Change to match the backend of your choice.
        backend.tf: |
          terraform {
            backend "s3" {
              bucket = "my-s3-to-store-tf-state"
              key = "${context.res.guresid}/state/terraform.tfstate"
              region = "eu-central-1"
            }
          }
        providers.tf: |
          terraform {
            required_providers {
              aws = {
                source = "hashicorp/aws"
                version = "~> 5.72.0"
              }
            }
          }
        vars.tf: |
          variable "REGION" {
              type = string
          }

          variable "BUCKET" {
              type = string
          }
        main.tf: |
          provider "aws" {
            region     = var.REGION
            default_tags {
              tags = {
                CreatedBy = "Humanitec"
              }
            }
          }

          resource "random_string" "bucket_suffix" {
            length           = 5
            special          = false
            upper            = false
          }

          module "aws_s3" {
            source = "terraform-aws-modules/s3-bucket/aws"
            bucket = format("%s-%s", var.BUCKET, random_string.bucket_suffix.result)
            acl    = "private"
            force_destroy = true
            control_object_ownership = true
            object_ownership         = "BucketOwnerPreferred"
          }

          output "region" {
            value = module.aws_s3.s3_bucket_region
          }

          output "bucket" {
            value = module.aws_s3.s3_bucket_id
          }
    secret_refs:
      cluster:
        agent_url: 
          value: ${resources['agent.default#runner'].outputs.url}
  criteria:
    # Change to match the name of the development type you want this to apply to
    - env_type: development
      app_id: container-test


Top