Template Driver
Resource Definitions using the Template Driver
This section contains example Resource Definitions using the Template Driver .
Add sidecar
Add a sidecar to workloads using the workload resource
The
workload
Resource Type can be used to make updates to resources before they are deployed into the cluster. In this example, a Resource Definition implementing the workload
Resource Type is used to inject the Open Telemetry agent as a sidecar into every workload. In addition to adding the sidecar, it also adds an environment variable called OTEL_EXPORTER_OTLP_ENDPOINT
to each container running in the workload.
otel-sidecar.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "otel-sidecar" {
driver_type = "humanitec/template"
id = "otel-sidecar"
name = "otel-sidecar"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
{{- /*
The "update" output is passed into the corresponding "update" output of the "workload" Resource Type.
*/ -}}
update:
{{- /*
Add the variable OTEL_EXPORTER_OTLP_ENDPOINT to all containers
*/ -}}
{{- range $containerId, $value := .resource.spec.containers }}
- op: add
path: /spec/containers/{{ $containerId }}/variables/OTEL_EXPORTER_OTLP_ENDPOINT
value: http://localhost:4317
{{- end }}
END_OF_TEXT
"manifests" = {
"sidecar.yaml" = {
"location" = "containers"
"data" = <<END_OF_TEXT
{{- /*
The Open Telemetry container as a sidecar in the workload
*/ -}}
command:
- "/otelcol"
- "--config=/conf/otel-agent-config.yaml"
image: otel/opentelemetry-collector:0.94.0
name: otel-agent
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 55679 # ZPages endpoint.
- containerPort: 4317 # Default OpenTelemetry receiver port.
- containerPort: 8888 # Metrics.
env:
- name: GOMEMLIMIT
value: 400MiB
volumeMounts:
- name: otel-agent-config-vol
mountPath: /conf
END_OF_TEXT
}
"sidecar-volume.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
{{- /*
A volume that is used to surface the config file
*/ -}}
configMap:
name: otel-agent-conf-{{ .id }}
items:
- key: otel-agent-config
path: otel-agent-config.yaml
name: otel-agent-config-vol
END_OF_TEXT
}
"otel-config-map.yaml" = {
"location" = "namespace"
"data" = <<END_OF_TEXT
{{- /*
The config file for the Open Telemetry agent. Notice that it's name includes the GUResID
*/ -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-agent-conf-{{ .id }}
labels:
app: opentelemetry
component: otel-agent-conf
data:
otel-agent-config: |
receivers:
otlp:
protocols:
grpc:
endpoint: localhost:4317
http:
endpoint: localhost:4318
exporters:
otlp:
endpoint: "otel-collector.default:4317"
tls:
insecure: true
sending_queue:
num_consumers: 4
queue_size: 100
retry_on_failure:
enabled: true
processors:
batch:
memory_limiter:
# 80% of maximum memory up to 2G
limit_mib: 400
# 25% of limit up to 2G
spike_limit_mib: 100
check_interval: 5s
extensions:
zpages: {}
service:
extensions: [zpages]
pipelines:
traces:
receivers: [otlp]
processors: [memory_limiter, batch]
exporters: [otlp]
END_OF_TEXT
}
}
}
})
}
}
otel-sidecar.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: otel-sidecar
entity:
name: otel-sidecar
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
{{- /*
The "update" output is passed into the corresponding "update" output of the "workload" Resource Type.
*/ -}}
update:
{{- /*
Add the variable OTEL_EXPORTER_OTLP_ENDPOINT to all containers
*/ -}}
{{- range $containerId, $value := .resource.spec.containers }}
- op: add
path: /spec/containers/{{ $containerId }}/variables/OTEL_EXPORTER_OTLP_ENDPOINT
value: http://localhost:4317
{{- end }}
manifests:
sidecar.yaml:
location: containers
data: |
{{- /*
The Open Telemetry container as a sidecar in the workload
*/ -}}
command:
- "/otelcol"
- "--config=/conf/otel-agent-config.yaml"
image: otel/opentelemetry-collector:0.94.0
name: otel-agent
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 55679 # ZPages endpoint.
- containerPort: 4317 # Default OpenTelemetry receiver port.
- containerPort: 8888 # Metrics.
env:
- name: GOMEMLIMIT
value: 400MiB
volumeMounts:
- name: otel-agent-config-vol
mountPath: /conf
sidecar-volume.yaml:
location: volumes
data: |
{{- /*
A volume that is used to surface the config file
*/ -}}
configMap:
name: otel-agent-conf-{{ .id }}
items:
- key: otel-agent-config
path: otel-agent-config.yaml
name: otel-agent-config-vol
otel-config-map.yaml:
location: namespace
data: |
{{- /*
The config file for the Open Telemetry agent. Notice that it's name includes the GUResID
*/ -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-agent-conf-{{ .id }}
labels:
app: opentelemetry
component: otel-agent-conf
data:
otel-agent-config: |
receivers:
otlp:
protocols:
grpc:
endpoint: localhost:4317
http:
endpoint: localhost:4318
exporters:
otlp:
endpoint: "otel-collector.default:4317"
tls:
insecure: true
sending_queue:
num_consumers: 4
queue_size: 100
retry_on_failure:
enabled: true
processors:
batch:
memory_limiter:
# 80% of maximum memory up to 2G
limit_mib: 400
# 25% of limit up to 2G
spike_limit_mib: 100
check_interval: 5s
extensions:
zpages: {}
service:
extensions: [zpages]
pipelines:
traces:
receivers: [otlp]
processors: [memory_limiter, batch]
exporters: [otlp]
criteria: []
Affinity
This section contains example Resource Definitions using the Template Driver for the affinity of Kubernetes Pods.
affinity.yaml
: Add affinity rules to the Workload. This format is for use with the Humanitec CLI .
affinity.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "workload-affinity" {
driver_type = "humanitec/template"
id = "workload-affinity"
name = "workload-affinity"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/affinity
value:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: another-node-label-key
operator: In
values:
- another-node-label-value
END_OF_TEXT
}
})
}
}
affinity.yaml
(
view on GitHub
)
:
# Add affinity rules to the Workload by adding a value to the manifest at .spec.affinity
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: workload-affinity
entity:
name: workload-affinity
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/affinity
value:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: another-node-label-key
operator: In
values:
- another-node-label-value
criteria: []
Annotations
This section shows how to use the Template Driver for managing annotations on Kubernetes objects.
While it is also possible to set annotations via Score , the approach shown here shifts the management of annotations down to the Platform, ensuring consistency and relieving developers of the task to repeat common annotations for each Workload in the Score extension file.
The example illustrates an annotation on the Kubernetes Service object (specific to Google Kubernetes Engine in this case). But if you want to see a more generic approach with annotations on Workloads too, you can follow the same approach as described in the example with labels .
annotations.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "annotations" {
driver_type = "humanitec/template"
id = "annotations"
name = "annotations"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/deployment/annotations
value:
{{- range $key, $val := .resource.spec.deployment.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
- op: add
path: /spec/pod/annotations
value:
{{- range $key, $val := .resource.spec.pod.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
# If the Score file also defines a service, add annotations to the service object
{{- if .resource.spec.service }}
- op: add
path: /spec/service/annotations
value:
{{- range $key, $val := .resource.spec.service.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- $port := values .resource.spec.service.ports | first }}
- op: add
path: /spec/service/annotations/cloud.google.com~1neg
value: '{"ingress":true,"exposed_ports":{ {{- $port.service_port | quote -}} :{}}}'
{{- end }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "annotations_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.annotations.id
}
annotations.yaml
(
view on GitHub
)
:
# This Resource Definition shows how to add annotations to the Kubernetes service object using the Template Driver
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: annotations
entity:
name: annotations
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/deployment/annotations
value:
{{- range $key, $val := .resource.spec.deployment.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
- op: add
path: /spec/pod/annotations
value:
{{- range $key, $val := .resource.spec.pod.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
# If the Score file also defines a service, add annotations to the service object
{{- if .resource.spec.service }}
- op: add
path: /spec/service/annotations
value:
{{- range $key, $val := .resource.spec.service.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- $port := values .resource.spec.service.ports | first }}
- op: add
path: /spec/service/annotations/cloud.google.com~1neg
value: '{"ingress":true,"exposed_ports":{ {{- $port.service_port | quote -}} :{}}}'
{{- end }}
criteria:
- {}
Endpoint
This section contains example Resource Definitions using the
Template Driver
for creating a endpoint
resource.
The endpoint
resource is used to represent an endpoint that a workload needs to interact with. This could for example be a shared service, external API etc.
Here, the template driver is used to provide optional outputs making use of the
default
function as well as
merge
to construct
dictionaries
with default values.
endpoint-def.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "endpoint-example-endpoint" {
driver_type = "humanitec/template"
id = "endpoint-example-endpoint"
name = "endpoint-example-endpoint"
type = "endpoint"
driver_inputs = {
values_string = jsonencode({
"host" = "example.com"
"port" = 8080
"templates" = {
"init" = <<END_OF_TEXT
{{- $username := .driver.secrets.username | default "" }}
{{- $password := .driver.secrets.password | default "" }}
username: {{ $username | toRawJson }}
password: {{ $password | toRawJson }}
userinfo: {{ if $username }}
{{- $username }}:
{{- end }}
{{- $password }}
hostport: {{ .driver.values.host }}
{{- if .driver.values.port -}}
:{{ .driver.values.port }}
{{- end }}
END_OF_TEXT
"outputs" = <<END_OF_TEXT
scheme: {{ .driver.values.scheme | default "http" }}
host: {{ .driver.values.host }}
port: {{ .driver.values.port }}
path: {{ .driver.values.path | default "" | toRawJson }}
query: {{ .driver.values.query | default "" | toRawJson }}
fragment: {{ .driver.values.fragment | default "" | toRawJson }}
END_OF_TEXT
"secrets" = <<END_OF_TEXT
username: {{ .init.username | toRawJson }}
password: {{ .init.password | toRawJson }}
url: {{ .outputs.values | merge (dict "userinfo" (.init.userinfo | default "") "host" .init.hostport) | urlJoin | toRawJson }}
END_OF_TEXT
}
})
}
}
endpoint-def.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: endpoint-example-endpoint
entity:
driver_inputs:
values:
# Commented out properties are optional
# scheme: http
host: example.com
port: 8080
# path: ""
# query: ""
# fragment: ""
templates:
init: |
{{- $username := .driver.secrets.username | default "" }}
{{- $password := .driver.secrets.password | default "" }}
username: {{ $username | toRawJson }}
password: {{ $password | toRawJson }}
userinfo: {{ if $username }}
{{- $username }}:
{{- end }}
{{- $password }}
hostport: {{ .driver.values.host }}
{{- if .driver.values.port -}}
:{{ .driver.values.port }}
{{- end }}
outputs: |
scheme: {{ .driver.values.scheme | default "http" }}
host: {{ .driver.values.host }}
port: {{ .driver.values.port }}
path: {{ .driver.values.path | default "" | toRawJson }}
query: {{ .driver.values.query | default "" | toRawJson }}
fragment: {{ .driver.values.fragment | default "" | toRawJson }}
secrets: |
username: {{ .init.username | toRawJson }}
password: {{ .init.password | toRawJson }}
url: {{ .outputs.values | merge (dict "userinfo" (.init.userinfo | default "") "host" .init.hostport) | urlJoin | toRawJson }}
# Both username and password are optional
# If supplied, they should be secret references to keys in the
# secret manager configured for the Humanitec Operator
# secret_refs:
# username:
# store:
# ref:
# password:
# store:
# ref:
driver_type: humanitec/template
type: endpoint
name: endpoint-example-endpoint
Horizontal pod autoscaler
This section contains a Resource Definition example for handling Kubernetes
HorizontalPodAutoscaler
by using the
template
Driver to configure your own HorizontalPodAutoscaler
implementation. You can
see this other example
if you want to use the hpa
Driver.
You can find a Score file example using the horizontal-pod-autoscaler
resource type
here
.
hpa.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "hpa" {
driver_type = "humanitec/template"
id = "hpa"
name = "hpa"
type = "horizontal-pod-autoscaler"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = <<END_OF_TEXT
{{ $defaultMaxReplicas := 3 }}
{{ $defaultMinReplicas := 2 }}
{{ $absoluteMaxReplicas := 10 }}
{{ $defaultTargetUtilizationPercent := 80 }}
workload: {{ index (splitList "." "$${context.res.id}") 1 }}
maxReplicas: {{ .resource.maxReplicas | default $defaultMaxReplicas | min $absoluteMaxReplicas }}
minReplicas: {{ .resource.minReplicas | default $defaultMinReplicas }}
targetCPUUtilizationPercentage: {{ .resource.targetCPUUtilizationPercentage | default $defaultTargetUtilizationPercent }}
END_OF_TEXT
"manifests" = <<END_OF_TEXT
hpa.yaml:
location: namespace
data:
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ .init.workload }}-hpa
spec:
maxReplicas: {{ .init.maxReplicas }}
metrics:
- resource:
name: cpu
target:
averageUtilization: {{ .init.targetCPUUtilizationPercentage }}
type: Utilization
type: Resource
minReplicas: {{ .init.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .init.workload }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "hpa_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.hpa.id
}
hpa.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: hpa
entity:
driver_type: humanitec/template
name: hpa
type: horizontal-pod-autoscaler
driver_inputs:
values:
templates:
init: |
{{ $defaultMaxReplicas := 3 }}
{{ $defaultMinReplicas := 2 }}
{{ $absoluteMaxReplicas := 10 }}
{{ $defaultTargetUtilizationPercent := 80 }}
workload: {{ index (splitList "." "${context.res.id}") 1 }}
maxReplicas: {{ .resource.maxReplicas | default $defaultMaxReplicas | min $absoluteMaxReplicas }}
minReplicas: {{ .resource.minReplicas | default $defaultMinReplicas }}
targetCPUUtilizationPercentage: {{ .resource.targetCPUUtilizationPercentage | default $defaultTargetUtilizationPercent }}
manifests: |
hpa.yaml:
location: namespace
data:
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ .init.workload }}-hpa
spec:
maxReplicas: {{ .init.maxReplicas }}
metrics:
- resource:
name: cpu
target:
averageUtilization: {{ .init.targetCPUUtilizationPercentage }}
type: Utilization
type: Resource
minReplicas: {{ .init.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .init.workload }}
criteria:
- {}
Imagepullsecrets
This section shows how to use the Template Driver for configuring cluster access to a private container image registry.
The example implements the Kubernetes standard mechanism to
Pull an Image from a Private Registry
. It creates a Kubernetes Secret of kubernetes.io/dockerconfigjson
type, reading the credentials from a secret store. It then configures the secret as the imagePullSecret
for a Workload’s Pod.
The example is applicable only when using the Humanitec Operator on the cluster. With the Operator, using the Registries feature of the Platform Orchestrator is not supported.
To use this mechanism, install the Resource Definitions of this example into your Organization, replacing some placeholder values with the actual values of your setup. Add the appropriate
matching criteria
to the workload
Definition to match the Workloads you want to have access to the private registry.
Note:
workload
is an implicit Resource Type so it is automatically referenced for every Deployment.
config.yaml
: Resource Definition oftype: config
that reads the credentials for the private registry from a secret store and creates the Kubernetes Secretworkload.yaml
: Resource Definition oftype: workload
that adds theimagePullSecrets
element to the Pod spec, referencing theconfig
Resource
config.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "regcred-config" {
driver_type = "humanitec/template"
id = "regcred-config"
name = "regcred-config"
type = "config"
driver_inputs = {
values_string = jsonencode({
"secret_name" = "regcred"
"server" = "FIXME"
"templates" = {
"init" = <<END_OF_TEXT
dockerConfigJson:
auths:
{{ .driver.values.server | quote }}:
username: {{ .driver.secrets.username | toRawJson }}
password: {{ .driver.secrets.password | toRawJson }}
END_OF_TEXT
"manifests" = {
"regcred-secret.yaml" = {
"data" = <<END_OF_TEXT
apiVersion: v1
kind: Secret
metadata:
name: {{ .driver.values.secret_name }}
data:
.dockerconfigjson: {{ .init.dockerConfigJson | toRawJson | b64enc }}
type: kubernetes.io/dockerconfigjson
END_OF_TEXT
"location" = "namespace"
}
}
"outputs" = "secret_name: {{ .driver.values.secret_name }}"
}
})
secret_refs = jsonencode({
"password" = {
"ref" = "regcred-password"
"store" = "FIXME"
}
"username" = {
"ref" = "regcred-username"
"store" = "FIXME"
}
})
}
}
resource "humanitec_resource_definition_criteria" "regcred-config_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.regcred-config.id
class = "default"
res_id = "regcred"
}
config.yaml
(
view on GitHub
)
:
# This Resource Definition pulls credentials for a container image registry from a secret store
# and creates a Kubernetes Secret of kubernetes.io/dockerconfigjson type
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: regcred-config
entity:
driver_type: humanitec/template
name: regcred-config
type: config
criteria:
- class: default
# This res_id must be used from a referencing Resource Definition to request this config Resource
res_id: regcred
driver_inputs:
# These secret references read the credentials from a secret store
secret_refs:
password:
ref: regcred-password
# Replace this value with the secret store id that's supplying the password
store: FIXME
username:
ref: regcred-username
# Replace this value with the secret store id that's supplying the username
store: FIXME
values:
secret_name: regcred
# Replace this value with the servername of your registry
server: FIXME
templates:
# The init template is used to prepare the "dockerConfigJson" content
init: |
dockerConfigJson:
auths:
{{ .driver.values.server | quote }}:
username: {{ .driver.secrets.username | toRawJson }}
password: {{ .driver.secrets.password | toRawJson }}
manifests:
# The manifests template creates the Kubernetes Secret
# which can then be used in the workload "imagePullSecrets"
regcred-secret.yaml:
data: |
apiVersion: v1
kind: Secret
metadata:
name: {{ .driver.values.secret_name }}
data:
.dockerconfigjson: {{ .init.dockerConfigJson | toRawJson | b64enc }}
type: kubernetes.io/dockerconfigjson
location: namespace
outputs: |
secret_name: {{ .driver.values.secret_name }}
workload.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "custom-workload" {
driver_type = "humanitec/template"
id = "custom-workload"
name = "custom-workload"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/imagePullSecrets
value:
- name: $${resources['config.default#regcred'].outputs.secret_name}
END_OF_TEXT
}
})
}
}
workload.yaml
(
view on GitHub
)
:
# This workload Resource Definition adds an "imagePullSecrets" element to the Pod spec
# It references a "config" type Resource to obtain the secret name
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: custom-workload
entity:
name: custom-workload
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/imagePullSecrets
value:
- name: ${resources['config.default#regcred'].outputs.secret_name}
Ingress
This section contains example Resource Definitions for handling Kubernetes ingress traffic. Instead of the Driver type Ingress , we are using the Template Driver type, which allows us to render any Kubernetes YAML object.
ingress-traefik.yaml
: defines anIngressRoute
object for the Traefik Ingress Controller using the IngressRoute custom resource definition . This format is for use with the Humanitec CLIingress-traefik-multiple-routes.yaml
: defines anIngressRoute
object for the Traefik Ingress Controller using the IngressRoute custom resource definition . It dynamically extracts the routes from theroute
resource in the Resource Graph to provide multiple routes. This format is for use with the Humanitec CLIingress-ambassador.yaml
: defines aMapping
object for the Ambassador Ingress Controller using the Mapping custom resource definition . This format is for use with the Humanitec CLI
ingress-ambassador.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "ambassador-ingress" {
driver_type = "template"
id = "ambassador-ingress"
name = "ambassador-ingress"
type = "ingress"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = <<END_OF_TEXT
name: {{ .id }}-ingress
secretname: $${resources.tls-cert.outputs.tls_secret_name}
host: $${resources.dns.outputs.host}
namespace: $${resources['k8s-namespace#k8s-namespace'].outputs.namespace}
END_OF_TEXT
"manifests" = <<END_OF_TEXT
ambassador-mapping.yaml:
data:
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: {{ .init.name }}-mapping
spec:
host: {{ .init.host }}
prefix: /
service: my-service-name:8080
location: namespace
ambassador-tlscontext.yaml:
data:
apiVersion: getambassador.io/v3alpha1
kind: TLSContext
metadata:
name: {{ .init.name }}-tlscontext
spec:
hosts:
- {{ .init.host }}
secret: {{ .init.secretname }}
location: namespace
END_OF_TEXT
}
})
}
}
ingress-ambassador.yaml
(
view on GitHub
)
:
# This Resource Definition provisions an IngressRoute object for the Traefik Ingress Controller
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: ambassador-ingress
entity:
name: ambassador-ingress
type: ingress
driver_type: template
driver_inputs:
values:
templates:
init: |
name: {{ .id }}-ingress
secretname: ${resources.tls-cert.outputs.tls_secret_name}
host: ${resources.dns.outputs.host}
namespace: ${resources['k8s-namespace#k8s-namespace'].outputs.namespace}
manifests: |
ambassador-mapping.yaml:
data:
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: {{ .init.name }}-mapping
spec:
host: {{ .init.host }}
prefix: /
service: my-service-name:8080
location: namespace
ambassador-tlscontext.yaml:
data:
apiVersion: getambassador.io/v3alpha1
kind: TLSContext
metadata:
name: {{ .init.name }}-tlscontext
spec:
hosts:
- {{ .init.host }}
secret: {{ .init.secretname }}
location: namespace
ingress-traefik-multiple-routes.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "traefik-ingress-eg" {
driver_type = "humanitec/template"
id = "traefik-ingress-eg"
name = "traefik-ingress-eg"
type = "ingress"
driver_inputs = {
values_string = jsonencode({
"routeHosts" = "$${resources.dns<route.outputs.host}"
"routePaths" = "$${resources.dns<route.outputs.path}"
"routePorts" = "$${resources.dns<route.outputs.port}"
"routeServices" = "$${resources.dns<route.outputs.service}"
"templates" = {
"init" = <<END_OF_TEXT
host: {{ .resource.host | quote }}
# ingress paths are added implicitely to our ingress resource based on the contents of our workload. These are an older
# alternative to route resources. Consider this deprecated in the future!
ingressPaths: {{ dig "rules" "http" (list) .resource | toRawJson }}
# The tls secret name could be generated by Humanitec or injected as an input parameter to our ingress.
tlsSecretName: {{ .driver.values.tls_secret_name | default .resource.tls_secret_name | default .driver.values.automatic_tls_secret_name | quote }}
{{- if eq (lower ( .driver.values.path_type | default "Prefix")) "exact" -}}
defaultMatchRule: Path
{{- else }}
defaultMatchRule: PathPrefix
{{- end }}
END_OF_TEXT
"manifests" = <<END_OF_TEXT
# Create our single manifest with many routes in it. Alternative configurations could create a manifest per route with unique file names if required.
ingressroute.yaml:
location: namespace
data:
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
# id is the unique resource uuid for this ingress
name: {{ .id }}-ingressroute
annotations:
{{- range $k, $v := .driver.values.annotations }}
{{ $k | toRawJson }}: {{ $v | toRawJson }}
{{- end }}
labels:
{{- range $k, $v := .driver.values.labels }}
{{ $k | toRawJson }}: {{ $v | toRawJson }}
{{- end }}
spec:
entryPoints:
- websecure
routes:
# Add all the paths from the dependent route resources. Route resources can have different hostnames but will all obey the path type set out in the resource inputs.
{{- range $index, $path := .driver.values.routePaths }}
- match: Host(`{{ index $.driver.values.routeHosts $index }}`) && {{ $.init.defaultMatchRule }}(`{{ $path }}`)
kind: Rule
services:
- kind: Service
name: {{ index $.driver.values.routeServices $index | toRawJson }}
port: {{ index $.driver.values.routePorts $index }}
{{- end }}
# Add all the support ingress paths. The old style ingress rules use a single hostname coming from the resource configuration but support different path types per rule.
# As mentioned further up, consider these deprecated in the future!
{{- range $path, $rule := .init.ingressPaths }}
{{ $lcType := lower $rule.type -}}
{{- if eq $lcType "implementationspecific" -}}
- match: Host(`{{ $.init.host }}`) && Path(`{{ $path }}`)
{{- else if eq $lcType "exact" -}}
- match: Host(`{{ $.init.host }}`) && Path(`{{ $path }}`)
{{ else }}
- match: Host(`{{ $.init.host }}`) && PathPrefix(`{{ $path }}`)
{{- end }}
kind: Rule
services:
- kind: Service
name: {{ $rule.name | quote }}
port: {{ $rule.port }}
{{- end }}
{{- if not (or .driver.values.no_tls (eq .init.tlsSecretName "")) }}
tls:
secretName: {{ .init.tlsSecretName | toRawJson }}
{{- end }}
END_OF_TEXT
}
})
}
}
ingress-traefik-multiple-routes.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: traefik-ingress-eg
entity:
name: traefik-ingress-eg
driver_type: humanitec/template
type: ingress
driver_inputs:
values:
# Find all the route resources that are dependent on any dns resources used in this workload.
# We extract arrays of their host, path, port, and service resource.
# These will become new entries in the .drivers.values table.
routeHosts: ${resources.dns<route.outputs.host}
routePaths: ${resources.dns<route.outputs.path}
routePorts: ${resources.dns<route.outputs.port}
routeServices: ${resources.dns<route.outputs.service}
templates:
# The init template gives us a place to precompute some fields that we'll use in the manifests template.
init: |
host: {{ .resource.host | quote }}
# ingress paths are added implicitely to our ingress resource based on the contents of our workload. These are an older
# alternative to route resources. Consider this deprecated in the future!
ingressPaths: {{ dig "rules" "http" (list) .resource | toRawJson }}
# The tls secret name could be generated by Humanitec or injected as an input parameter to our ingress.
tlsSecretName: {{ .driver.values.tls_secret_name | default .resource.tls_secret_name | default .driver.values.automatic_tls_secret_name | quote }}
{{- if eq (lower ( .driver.values.path_type | default "Prefix")) "exact" -}}
defaultMatchRule: Path
{{- else }}
defaultMatchRule: PathPrefix
{{- end }}
manifests: |
# Create our single manifest with many routes in it. Alternative configurations could create a manifest per route with unique file names if required.
ingressroute.yaml:
location: namespace
data:
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
# id is the unique resource uuid for this ingress
name: {{ .id }}-ingressroute
annotations:
{{- range $k, $v := .driver.values.annotations }}
{{ $k | toRawJson }}: {{ $v | toRawJson }}
{{- end }}
labels:
{{- range $k, $v := .driver.values.labels }}
{{ $k | toRawJson }}: {{ $v | toRawJson }}
{{- end }}
spec:
entryPoints:
- websecure
routes:
# Add all the paths from the dependent route resources. Route resources can have different hostnames but will all obey the path type set out in the resource inputs.
{{- range $index, $path := .driver.values.routePaths }}
- match: Host(`{{ index $.driver.values.routeHosts $index }}`) && {{ $.init.defaultMatchRule }}(`{{ $path }}`)
kind: Rule
services:
- kind: Service
name: {{ index $.driver.values.routeServices $index | toRawJson }}
port: {{ index $.driver.values.routePorts $index }}
{{- end }}
# Add all the support ingress paths. The old style ingress rules use a single hostname coming from the resource configuration but support different path types per rule.
# As mentioned further up, consider these deprecated in the future!
{{- range $path, $rule := .init.ingressPaths }}
{{ $lcType := lower $rule.type -}}
{{- if eq $lcType "implementationspecific" -}}
- match: Host(`{{ $.init.host }}`) && Path(`{{ $path }}`)
{{- else if eq $lcType "exact" -}}
- match: Host(`{{ $.init.host }}`) && Path(`{{ $path }}`)
{{ else }}
- match: Host(`{{ $.init.host }}`) && PathPrefix(`{{ $path }}`)
{{- end }}
kind: Rule
services:
- kind: Service
name: {{ $rule.name | quote }}
port: {{ $rule.port }}
{{- end }}
{{- if not (or .driver.values.no_tls (eq .init.tlsSecretName "")) }}
tls:
secretName: {{ .init.tlsSecretName | toRawJson }}
{{- end }}
ingress-traefik.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "traefik-ingress" {
driver_type = "template"
id = "traefik-ingress"
name = "traefik-ingress"
type = "ingress"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = <<END_OF_TEXT
name: {{ .id }}-ir
secretname: $${resources.tls-cert.outputs.tls_secret_name}
host: $${resources.dns.outputs.host}
namespace: $${resources['k8s-namespace#k8s-namespace'].outputs.namespace}
END_OF_TEXT
"manifests" = <<END_OF_TEXT
traefik-ingressroute.yaml:
data:
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: {{ .init.name }}
spec:
routes:
- match: Host(`{{ .init.host }}`) && PathPrefix(`/`)
kind: Rule
services:
- name: my-service-name
kind: Service
port: 8080
namespace: {{ .init.namespace }}
tls:
secretName: {{ .init.secretname }}
location: namespace
END_OF_TEXT
}
})
}
}
ingress-traefik.yaml
(
view on GitHub
)
:
# This Resource Definition provisions an IngressRoute object for the Traefik Ingress Controller
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: traefik-ingress
entity:
name: traefik-ingress
type: ingress
driver_type: template
driver_inputs:
values:
templates:
init: |
name: {{ .id }}-ir
secretname: ${resources.tls-cert.outputs.tls_secret_name}
host: ${resources.dns.outputs.host}
namespace: ${resources['k8s-namespace#k8s-namespace'].outputs.namespace}
manifests: |
traefik-ingressroute.yaml:
data:
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: {{ .init.name }}
spec:
routes:
- match: Host(`{{ .init.host }}`) && PathPrefix(`/`)
kind: Rule
services:
- name: my-service-name
kind: Service
port: 8080
namespace: {{ .init.namespace }}
tls:
secretName: {{ .init.secretname }}
location: namespace
Labels
This section shows how to use the Template Driver for managing labels on Kubernetes objects.
While it is also possible to set labels via Score , the approach shown here shifts the management of labels down to the Platform, ensuring consistency and relieving developers of the task to repeat common labels for each Workload in the Score extension file.
config-labels.yaml
: Resource Definition of typeconfig
which defines the value for a sample label at a central place.custom-workload-with-dynamic-labels.yaml
: Add dynamic labels to your Workload. This format is for use with the Humanitec CLI .custom-namespace-with-dynamic-labels.yaml
: Add dynamic labels to your Namespace. This format is for use with the Humanitec CLI .
config-labels.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "app-config" {
driver_type = "humanitec/template"
id = "app-config"
name = "app-config"
type = "config"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = "cost_center_id: my-example-id\n"
}
})
}
}
resource "humanitec_resource_definition_criteria" "app-config_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.app-config.id
res_id = "app-config"
}
config-labels.yaml
(
view on GitHub
)
:
# This "config" type Resource Definition provides the value for the sample label
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: app-config
entity:
name: app-config
type: config
driver_type: humanitec/template
driver_inputs:
values:
templates:
# Returns a sample output named "cost_center_id" to be used as a label
outputs: |
cost_center_id: my-example-id
# Match the resource ID "app-config" so that it can be requested via that ID
criteria:
- res_id: app-config
custom-namespace-with-dynamic-labels.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "custom-namespace-with-label" {
driver_type = "humanitec/template"
id = "custom-namespace-with-label"
name = "custom-namespace-with-label"
type = "k8s-namespace"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = "name: $${context.app.id}-$${context.env.id}\n"
"manifests" = <<END_OF_TEXT
namespace.yaml:
location: cluster
data:
apiVersion: v1
kind: Namespace
metadata:
labels:
env_id: $${context.env.id}
cost_center_id: $${resources['config.default#app-config'].outputs.cost_center_id}
name: {{ .init.name }}
END_OF_TEXT
"outputs" = "namespace: {{ .init.name }}\n"
}
})
}
}
resource "humanitec_resource_definition_criteria" "custom-namespace-with-label_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.custom-namespace-with-label.id
}
custom-namespace-with-dynamic-labels.yaml
(
view on GitHub
)
:
# This Resource Definition references the "config" resource to use its output as a label
# and adds another label taken from the Deployment context
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: custom-namespace-with-label
entity:
name: custom-namespace-with-label
type: k8s-namespace
driver_type: humanitec/template
driver_inputs:
values:
templates:
init: |
name: ${context.app.id}-${context.env.id}
manifests: |
namespace.yaml:
location: cluster
data:
apiVersion: v1
kind: Namespace
metadata:
labels:
env_id: ${context.env.id}
cost_center_id: ${resources['config.default#app-config'].outputs.cost_center_id}
name: {{ .init.name }}
outputs: |
namespace: {{ .init.name }}
# Set matching criteria as required
criteria:
- {}
custom-workload-with-dynamic-labels.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "custom-workload-with-label" {
driver_type = "humanitec/template"
id = "custom-workload-with-label"
name = "custom-workload-with-label"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/deployment/labels
value:
{{- range $key, $val := .resource.spec.deployment.labels }}
{{ $key }}: {{ $val | quote }}
{{- end }}
env_id: $${context.env.id}
cost_center_id: $${resources['config.default#app-config'].outputs.cost_center_id}
- op: add
path: /spec/pod/labels
value:
{{- range $key, $val := .resource.spec.pod.labels }}
{{ $key }}: {{ $val | quote }}
{{- end }}
# If the Score file also defines a service, add labels to the service object
{{- if .resource.spec.service }}
- op: add
path: /spec/service/labels
value:
{{- range $key, $val := .resource.spec.service.labels }}
{{ $key }}: {{ $val | quote }}
{{- end }}
env_id: $${context.env.id}
cost_center_id: $${resources['config.default#app-config'].outputs.cost_center_id}
{{- end }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "custom-workload-with-label_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.custom-workload-with-label.id
}
custom-workload-with-dynamic-labels.yaml
(
view on GitHub
)
:
# This Resource Definition references the "config" resource to use its output as a label
# and adds another label taken from the Deployment context
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: custom-workload-with-label
entity:
name: custom-workload-with-label
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/deployment/labels
value:
{{- range $key, $val := .resource.spec.deployment.labels }}
{{ $key }}: {{ $val | quote }}
{{- end }}
env_id: ${context.env.id}
cost_center_id: ${resources['config.default#app-config'].outputs.cost_center_id}
- op: add
path: /spec/pod/labels
value:
{{- range $key, $val := .resource.spec.pod.labels }}
{{ $key }}: {{ $val | quote }}
{{- end }}
# If the Score file also defines a service, add labels to the service object
{{- if .resource.spec.service }}
- op: add
path: /spec/service/labels
value:
{{- range $key, $val := .resource.spec.service.labels }}
{{ $key }}: {{ $val | quote }}
{{- end }}
env_id: ${context.env.id}
cost_center_id: ${resources['config.default#app-config'].outputs.cost_center_id}
{{- end }}
# Set matching criteria as required
criteria:
- {}
Namespace
This section contains example Resource Definitions using the Template Driver for managing Kubernetes namespaces .
custom-namespace.yaml
: Create Kubernetes namespaces with your own custom naming scheme. This format is for use with the Humanitec CLI .custom-namespace.tf
: Create Kubernetes namespaces with your own custom naming scheme. This format is for use with the Humanitec Terraform provider .short-namespace.yaml
: Create Kubernetes namespaces with your own custom naming scheme of defined length. This format is for use with the Humanitec CLI .
custom-namespace.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "custom-namespace" {
driver_type = "humanitec/template"
id = "custom-namespace"
name = "custom-namespace2"
type = "k8s-namespace"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = "name: $${context.env.id}-$${context.app.id}\n"
"manifests" = <<END_OF_TEXT
namespace.yaml:
location: cluster
data:
apiVersion: v1
kind: Namespace
metadata:
labels:
pod-security.kubernetes.io/enforce: restricted
name: {{ .init.name }}
END_OF_TEXT
"outputs" = "namespace: {{ .init.name }}\n"
}
})
}
}
resource "humanitec_resource_definition_criteria" "custom-namespace_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.custom-namespace.id
}
custom-namespace.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: custom-namespace
entity:
name: custom-namespace2
type: k8s-namespace
driver_type: humanitec/template
driver_inputs:
values:
templates:
# Use any combination of placeholders and characters to configure your naming scheme
init: |
name: ${context.env.id}-${context.app.id}
manifests: |
namespace.yaml:
location: cluster
data:
apiVersion: v1
kind: Namespace
metadata:
labels:
pod-security.kubernetes.io/enforce: restricted
name: {{ .init.name }}
outputs: |
namespace: {{ .init.name }}
criteria:
- {}
short-namespace.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "custom-namespace" {
driver_type = "humanitec/template"
id = "custom-namespace"
name = "custom-namespace"
type = "k8s-namespace"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = "name: {{ trunc 8 \"$${context.env.id}\" }}-{{ trunc 8 \"$${context.app.id}\" }}\n"
"manifests" = <<END_OF_TEXT
namespace.yaml:
location: cluster
data:
apiVersion: v1
kind: Namespace
metadata:
labels:
pod-security.kubernetes.io/enforce: restricted
name: {{ .init.name }}
END_OF_TEXT
"outputs" = "namespace: {{ .init.name }}\n"
}
})
}
}
resource "humanitec_resource_definition_criteria" "custom-namespace_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.custom-namespace.id
}
short-namespace.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: custom-namespace
entity:
name: custom-namespace
type: k8s-namespace
driver_type: humanitec/template
driver_inputs:
values:
templates:
# Here the namespace name is shortened to be a maximum of 17 characters,
# no matter how long the app and env name might be.
init: |
name: {{ trunc 8 "${context.env.id}" }}-{{ trunc 8 "${context.app.id}" }}
manifests: |
namespace.yaml:
location: cluster
data:
apiVersion: v1
kind: Namespace
metadata:
labels:
pod-security.kubernetes.io/enforce: restricted
name: {{ .init.name }}
outputs: |
namespace: {{ .init.name }}
criteria:
- {}
Namespaced resources
This example shows a sample usage of the base-env
Resource Type. It is one of the implicit
Resource Types
that always gets provisioned for a Deployment.
In this example, it will be used to provision multiple Kubernetes resources scoped to the Namespace: ResourceQuota and NetworkPolicies :
- The Resource Definition
base-env-resource-quota.yaml
uses thetemplate
driver to provision a Kubernetes manifest describing a ResourceQuota in the target namespace. - The Resource Definition
base-env-network-policies.yaml
uses thetemplate
driver to provision a Kubernetes manifest describing a NetworkPolicies in the target namespace.
Splitting provisioning of the two Kubernetes Resources in two different Resource Definitions allows to:
- Keep modularity: the same
base-env-resource-quota
(orbase-env-network-policies
) Resource Definition can be used by differentbase-env-default
. - Allow flexibility: every
base-env
can use a different Resource Driver (e.g.template
,terraform
).
The base-env-default.yaml
Resource Definition creates a dependency on the other two base-env
Resource Definitions using a
Resource Reference
. The reference specifies different Resource IDs (resource-quota
and network-policies
) so that the proper base-env
Resource Definitions will be matched based on their matching criteria.
Three base-env
Resource Definitions are provided:
base-env-default.yaml
to add thebase-env
Resources that provision the Kubernetes manifests to the Resource Graphbase-env-resource-quota.yaml
will be matched for all references ofres_id: resource-quota
base-env-network-policies.yaml
will be matched for all references ofres_id: network-policies
base-env-default.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "base-env-default" {
driver_type = "humanitec/echo"
id = "base-env-default"
name = "base-env-default"
type = "base-env"
driver_inputs = {
values_string = jsonencode({
"namespaced-resources" = {
"resource-quota" = "$${resources[\"base-env.default#resource-quota\"].guresid}"
"network-policies" = "$${resources[\"base-env.default#network-policies\"].guresid}"
}
})
}
}
base-env-default.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: base-env-default
entity:
name: base-env-default
type: base-env
driver_type: humanitec/echo
driver_inputs:
values:
namespaced-resources:
resource-quota: ${resources["base-env.default#resource-quota"].guresid}
network-policies: ${resources["base-env.default#network-policies"].guresid}
base-env-network-policies.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "base-env-network-policies" {
driver_type = "humanitec/template"
id = "base-env-network-policies"
name = "base-env-network-policies"
type = "base-env"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = "network-policies.yaml:\n location: namespace\n data:\n apiVersion: networking.k8s.io/v1\n kind: NetworkPolicy\n metadata:\n name: default-deny-egress\n spec:\n podSelector: {}\n policyTypes:\n - Egress"
}
})
}
}
resource "humanitec_resource_definition_criteria" "base-env-network-policies_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.base-env-network-policies.id
res_id = "network-policies"
}
base-env-network-policies.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: base-env-network-policies
entity:
name: base-env-network-policies
type: base-env
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests: |-
network-policies.yaml:
location: namespace
data:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-egress
spec:
podSelector: {}
policyTypes:
- Egress
criteria:
- res_id: network-policies
base-env-resource-quota.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "base-env-resource-quota" {
driver_type = "humanitec/template"
id = "base-env-resource-quota"
name = "base-env-resource-quota"
type = "base-env"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = "quota.yaml:\n location: namespace\n data:\n apiVersion: v1\n kind: ResourceQuota\n metadata:\n name: compute-resources\n spec:\n hard:\n limits.cpu: 1\n limits.memory: 256Mi"
}
})
}
}
resource "humanitec_resource_definition_criteria" "base-env-resource-quota_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.base-env-resource-quota.id
res_id = "resource-quota"
}
base-env-resource-quota.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: base-env-resource-quota
entity:
name: base-env-resource-quota
type: base-env
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests: |-
quota.yaml:
location: namespace
data:
apiVersion: v1
kind: ResourceQuota
metadata:
name: compute-resources
spec:
hard:
limits.cpu: 1
limits.memory: 256Mi
criteria:
- res_id: resource-quota
Node selector
This section contains example Resource Definitions using the Template Driver for setting nodeSelectors on your Pods.
aci-workload.yaml
: Add the required node selector and tolerations to the Workload so it can be scheduled on an Azure AKS virtual node . This format is for use with the Humanitec CLI .
aci-workload.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "aci-workload" {
driver_type = "humanitec/template"
id = "aci-workload"
name = "aci-workload"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/tolerations
value:
- key: "virtual-kubelet.io/provider"
operator: "Exists"
- key: "azure.com/aci"
effect: "NoSchedule"
- op: add
path: /spec/nodeSelector
value:
kubernetes.io/role: agent
beta.kubernetes.io/os: linux
type: virtual-kubelet
END_OF_TEXT
}
})
}
}
aci-workload.yaml
(
view on GitHub
)
:
# Add tolerations and nodeSelector to the Workload to make it runnable AKS virtual nodes
# served through Azure Container Instances (ACI).
# See https://learn.microsoft.com/en-us/azure/aks/virtual-nodes-cli
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: aci-workload
entity:
name: aci-workload
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/tolerations
value:
- key: "virtual-kubelet.io/provider"
operator: "Exists"
- key: "azure.com/aci"
effect: "NoSchedule"
- op: add
path: /spec/nodeSelector
value:
kubernetes.io/role: agent
beta.kubernetes.io/os: linux
type: virtual-kubelet
criteria: []
Resourcequota
This example shows a sample usage of the base-env
Resource Type. It is one of the implicit
Resource Types
that always gets provisioned for a Deployment.
The Resource Definition base-env-resourcequota.yaml
uses it the provision a Kubernetes manifest describing a
ResourceQuota
in the target namespace.
The base-env
Resource Definition reads the configuration values from another Resource of type config
using a
Resource Reference
. The reference specifies a class (config#quota
) so that the proper config
Resource Definition will be matched based on its matching criteria.
Two config
Resource Definitions are provided:
config-quota.yaml
will be matched for all references ofres_id: quota
config-quota-override.yaml
will additionally be matched for a particularapp_id: my-app
only, effectively providing an override for the configuration values for this particular Application id
The Resource Graphs for two Applications, one of which matches the “override” criteria, will look like this:
flowchart LR
subgraph app2[Resource Graph "my-app"]
direction LR
workload2[Workload] --> baseEnv2(type: base-env\nid: base-env) --> config2("type: config\nid:quota")
end
subgraph app1[Resource Graph "some-app"]
direction LR
workload1[Workload] --> baseEnv1(type: base-env\nid: base-env) --> config1("type: config\nid: quota")
end
resDefBaseEnv[base-env\nResource Definition]
resDefBaseEnv -.-> baseEnv1
resDefBaseEnv -.-> baseEnv2
resDefQuotaConfig[config-quota\nResource Definition] -.->|criteria:\n- res_id: quota| config1
resDefQuotaConfigOverride[config-quota-override\nResource Definition] -.->|criteria:\n- res_id: quota\n app_id: my-app| config2
base-env-resourcequota.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "base-env" {
driver_type = "humanitec/template"
id = "base-env"
name = "base-env"
type = "base-env"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = "quota.yaml:\n location: namespace\n data:\n apiVersion: v1\n kind: ResourceQuota\n metadata:\n name: compute-resources\n spec:\n hard:\n limits.cpu: $${resources['config#quota'].outputs.limits-cpu}\n limits.memory: $${resources['config#quota'].outputs.limits-memory}"
}
})
}
}
resource "humanitec_resource_definition_criteria" "base-env_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.base-env.id
}
base-env-resourcequota.yaml
(
view on GitHub
)
:
# This Resource Definition uses the base-env Resource type to create
# a ResourceQuota manifest in the target namespace.
# The actual values are read from a referenced config resource.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: base-env
entity:
name: base-env
type: base-env
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests: |-
quota.yaml:
location: namespace
data:
apiVersion: v1
kind: ResourceQuota
metadata:
name: compute-resources
spec:
hard:
limits.cpu: ${resources['config#quota'].outputs.limits-cpu}
limits.memory: ${resources['config#quota'].outputs.limits-memory}
criteria:
- {}
config-quota-override.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "quota-config-override" {
driver_type = "humanitec/echo"
id = "quota-config-override"
name = "quota-config-override"
type = "config"
driver_inputs = {
values_string = jsonencode({
"limits-cpu" = "750m"
"limits-memory" = "750Mi"
})
}
}
resource "humanitec_resource_definition_criteria" "quota-config-override_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.quota-config-override.id
res_id = "quota"
app_id = "my-app"
}
config-quota-override.yaml
(
view on GitHub
)
:
# This Resource Definition uses the Echo Driver to provide configuration values
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: quota-config-override
entity:
name: quota-config-override
type: config
driver_type: humanitec/echo
driver_inputs:
# Any Driver inputs will be returned as outputs by the Echo Driver
values:
limits-cpu: "750m"
limits-memory: "750Mi"
# The matching criteria make this Resource Definition match for a particular app_id only
criteria:
- res_id: quota
app_id: my-app
config-quota.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "quota-config" {
driver_type = "humanitec/echo"
id = "quota-config"
name = "quota-config"
type = "config"
driver_inputs = {
values_string = jsonencode({
"limits-cpu" = "500m"
"limits-memory" = "500Mi"
})
}
}
resource "humanitec_resource_definition_criteria" "quota-config_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.quota-config.id
res_id = "quota"
}
config-quota.yaml
(
view on GitHub
)
:
# This Resource Definition uses the Echo Driver to provide configuration values
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: quota-config
entity:
name: quota-config
type: config
driver_type: humanitec/echo
driver_inputs:
# Any Driver inputs will be returned as outputs by the Echo Driver
values:
limits-cpu: "500m"
limits-memory: "500Mi"
criteria:
- res_id: quota
Security context
This section contains example Resource Definitions using the
Template Driver
for adding
Security Context on Kubernetes Deployment
.
custom-workload-with-security-context.yaml
: Add Security Context to your Workload. This format is for use with the Humanitec CLI .custom-workload-with-security-context.tf
: Add Security Context to your Workload. This format is for use with the Humanitec Terraform provider .
custom-workload-with-security-context.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "custom-workload" {
driver_type = "humanitec/template"
id = "custom-workload"
name = "custom-workload"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/securityContext
value:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
{{- range $containerId, $value := .resource.spec.containers }}
- op: add
path: /spec/containers/{{ $containerId }}/securityContext
value:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
{{- end }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "custom-workload_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.custom-workload.id
}
custom-workload-with-security-context.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: custom-workload
entity:
name: custom-workload
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/securityContext
value:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
{{- range $containerId, $value := .resource.spec.containers }}
- op: add
path: /spec/containers/{{ $containerId }}/securityContext
value:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
{{- end }}
criteria:
- {}
Serviceaccount
This section contains example Resource Definitions using the Template Driver for provisioning Kubernetes ServiceAccounts for your Workloads.
The solution consists of a combination of two Resource Definitions of type workload
and k8s-service-account
.
The
workload
Resource Type
is an
implicit
Type which is automatically referenced for any Deployment.
This workload
Resource Definition adds the serviceAccountName
item to the Pod spec and references a
k8s-service-account
type Resource
, causing it to be provisioned. The k8s-service-account
Resource Definition generates the Kubernetes manifest for the actual ServiceAccount.
A Resource Graph for a Workload using those Resource Definitions will look like this:
flowchart LR
workloadVirtual[Workload "my-workload"] --> workload(id: modules.my-workload\ntype: workload\nclass: default)
workload --> serviceAccount(id: modules.my-workload\ntype: k8s-service-account\nclass: default)
Note that the resource id
is used in the k8s-service-account
Resource Definition to derive the name of the actual Kubernetes ServiceAccount. Check the code for details.
serviceaccount-k8ssa-def.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "serviceaccount-k8s-service-account" {
driver_type = "humanitec/template"
id = "serviceaccount-k8s-service-account"
name = "serviceaccount-k8s-service-account"
type = "k8s-service-account"
driver_inputs = {
values_string = jsonencode({
"res_id" = "$${context.res.id}"
"templates" = {
"init" = "name: {{ index ( .driver.values.res_id | splitList \".\" ) 1 }}\n"
"outputs" = "name: {{ .init.name }}\n"
"manifests" = <<END_OF_TEXT
service-account.yaml:
location: namespace
data:
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .init.name }}
END_OF_TEXT
}
})
}
}
serviceaccount-k8ssa-def.yaml
(
view on GitHub
)
:
# This Resource Defintion provisions a Kubernetes ServiceAccount
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: serviceaccount-k8s-service-account
entity:
driver_type: humanitec/template
name: serviceaccount-k8s-service-account
type: k8s-service-account
driver_inputs:
values:
res_id: ${context.res.id}
templates:
init: |
name: {{ index ( .driver.values.res_id | splitList "." ) 1 }}
outputs: |
name: {{ .init.name }}
manifests: |
service-account.yaml:
location: namespace
data:
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .init.name }}
serviceaccount-workload-def.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "serviceaccount-workload" {
driver_type = "humanitec/template"
id = "serviceaccount-workload"
name = "serviceaccount-workload"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/serviceAccountName
value: $${resources.k8s-service-account.outputs.name}
END_OF_TEXT
}
})
}
}
serviceaccount-workload-def.yaml
(
view on GitHub
)
:
# This Resource Definition adds a Kubernetes ServiceAccount to a Workload
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: serviceaccount-workload
entity:
driver_type: humanitec/template
name: serviceaccount-workload
type: workload
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/serviceAccountName
value: ${resources.k8s-service-account.outputs.name}
Tls cert
This section contains example Resource Definitions using the Template Driver for managing TLS Certificates in your cluster.
certificate-crd.yaml
: Add a certificate custom resource definition in the namespace of your deployment. This format is for use with the Humanitec CLI .
certificate-crd.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "certificate-crd" {
driver_type = "humanitec/template"
id = "certificate-crd"
name = "certificate-crd"
type = "tls-cert"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = <<END_OF_TEXT
tlsSecretName: {{ .id }}-tls
hostName: $${resources.dns.outputs.host}
certificateName: {{ .id }}-cert
END_OF_TEXT
"manifests" = <<END_OF_TEXT
certificate-crd.yml:
data:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .init.certificateName }}
spec:
secretName: {{ .init.tlsSecretName }}
duration: 2160h # 90d
renewBefore: 720h # 30d
isCA: false
privateKey:
algorithm: RSA
encoding: PKCS1
size: 2048
usages:
- server auth
- client auth
dnsNames:
- {{ .init.hostName | toString | toRawJson }}
# The name of the issuerRef must point to the issuer / clusterIssuer in your cluster
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
location: namespace
END_OF_TEXT
"outputs" = "tls_secret_name: {{ .init.tlsSecretName }}\n"
}
})
}
}
resource "humanitec_resource_definition_criteria" "certificate-crd_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.certificate-crd.id
class = "default"
}
certificate-crd.yaml
(
view on GitHub
)
:
# This Resource Definition creates a certificate custom resource definition,
# which will instruct cert-manager to create a TLS certificate
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: certificate-crd
entity:
driver_type: humanitec/template
name: certificate-crd
type: tls-cert
criteria:
- class: default
driver_inputs:
values:
templates:
init: |
tlsSecretName: {{ .id }}-tls
hostName: ${resources.dns.outputs.host}
certificateName: {{ .id }}-cert
manifests: |
certificate-crd.yml:
data:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .init.certificateName }}
spec:
secretName: {{ .init.tlsSecretName }}
duration: 2160h # 90d
renewBefore: 720h # 30d
isCA: false
privateKey:
algorithm: RSA
encoding: PKCS1
size: 2048
usages:
- server auth
- client auth
dnsNames:
- {{ .init.hostName | toString | toRawJson }}
# The name of the issuerRef must point to the issuer / clusterIssuer in your cluster
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
location: namespace
outputs: |
tls_secret_name: {{ .init.tlsSecretName }}
Tolerations
This section contains example Resource Definitions using the Template Driver for managing tolerations on your Pods.
tolerations.yaml
: Add tolerations to the Workload. This format is for use with the Humanitec CLI .
tolerations.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "workload-toleration" {
driver_type = "humanitec/template"
id = "workload-toleration"
name = "workload-toleration"
type = "workload"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"outputs" = <<END_OF_TEXT
update:
- op: add
path: /spec/tolerations
value:
- key: "example-key"
operator: "Exists"
effect: "NoSchedule"
END_OF_TEXT
}
})
}
}
tolerations.yaml
(
view on GitHub
)
:
# Add tolerations to the Workload by adding a value to the manifest at .spec.tolerations
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: workload-toleration
entity:
name: workload-toleration
type: workload
driver_type: humanitec/template
driver_inputs:
values:
templates:
outputs: |
update:
- op: add
path: /spec/tolerations
value:
- key: "example-key"
operator: "Exists"
effect: "NoSchedule"
criteria: []
Volumes
This section contains Resource Definition examples for handling Kubernetes
Volumes
by using the
template
Driver to configure your own PersistentVolume
implementation. You can
see this other example
if you want to use the volume-pvc
Driver.
You will find two examples:
volume-emptydir
- in order to inject anemptyDir
volume
in a Workload for any request of avolume
resource with theclass
ephemeral
.volume-nfs
- in order to create the associatedPersistentVolumeClaim
,PersistentVolume
andvolume
in a Workload for any request of avolume
resource with theclass
nfs
.
You can find a Score file example using the volume
resource type
here
.
volume-emptydir.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "volume-emptydir" {
driver_type = "humanitec/template"
id = "volume-emptydir"
name = "volume-emptydir"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = {
"emptydir.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: $${context.res.guresid}-emptydir
emptyDir:
sizeLimit: 1024Mi
END_OF_TEXT
}
}
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-emptydir_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-emptydir.id
class = "ephemeral"
}
volume-emptydir.yaml
(
view on GitHub
)
:
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-emptydir
entity:
name: volume-emptydir
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests:
emptydir.yaml:
location: volumes
data: |
name: ${context.res.guresid}-emptydir
emptyDir:
sizeLimit: 1024Mi
criteria:
- class: ephemeral
volume-nfs.tf
(
view on GitHub
)
:
resource "humanitec_resource_definition" "volume-nfs" {
driver_type = "humanitec/template"
id = "volume-nfs"
name = "volume-nfs"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = <<END_OF_TEXT
# Generate a unique id for each pv/pvc combination.
# Every Workload will have a separate pv and pvc created for it,
# but pointing to the same NFS server endpoint.
volumeUid: {{ randNumeric 4 }}-{{ randNumeric 4 }}
pvBaseName: pv-tmpl-
pvcBaseName: pvc-tmpl-
volBaseName: vol-tmpl-
END_OF_TEXT
"manifests" = {
"app-pv-tmpl.yaml" = {
"location" = "namespace"
"data" = <<END_OF_TEXT
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ .init.pvBaseName }}{{ .init.volumeUid }}
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
nfs:
server: nfs-server.default.svc.cluster.local
path: "/"
mountOptions:
- nfsvers=4.2
END_OF_TEXT
}
"app-pvc-tmpl.yaml" = {
"location" = "namespace"
"data" = <<END_OF_TEXT
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 1Mi
volumeName: {{ .init.pvBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
"app-vol-tmpl.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: {{ .init.volBaseName }}{{ .init.volumeUid }}
persistentVolumeClaim:
claimName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
}
"outputs" = <<END_OF_TEXT
volumeName: {{ .init.volBaseName }}{{ .init.volumeUid }}
pvcName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-nfs_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-nfs.id
class = "nfs"
}
volume-nfs.yaml
(
view on GitHub
)
:
# Using the Template Driver for the static provisioning of
# a Kubernetes PersistentVolume and PersistentVolumeClaim combination,
# then adding the volume into the Pod of the Workload.
# The volumeMount in the container is defined in the "workload" type Resource Definition.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-nfs
entity:
name: volume-nfs
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
init: |
# Generate a unique id for each pv/pvc combination.
# Every Workload will have a separate pv and pvc created for it,
# but pointing to the same NFS server endpoint.
volumeUid: {{ randNumeric 4 }}-{{ randNumeric 4 }}
pvBaseName: pv-tmpl-
pvcBaseName: pvc-tmpl-
volBaseName: vol-tmpl-
manifests:
####################################################################
# This template creates the PersistentVolume in the target namespace
# Modify the nfs server and path to address your NFS server
####################################################################
app-pv-tmpl.yaml:
location: namespace
data: |
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ .init.pvBaseName }}{{ .init.volumeUid }}
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
nfs:
server: nfs-server.default.svc.cluster.local
path: "/"
mountOptions:
- nfsvers=4.2
#########################################################################
# This template creates the PersistentVolumeClaim in the target namespace
#########################################################################
app-pvc-tmpl.yaml:
location: namespace
data: |
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 1Mi
volumeName: {{ .init.pvBaseName }}{{ .init.volumeUid }}
########################################################
# This template creates the volume in the Workload's Pod
########################################################
app-vol-tmpl.yaml:
location: volumes
data: |
name: {{ .init.volBaseName }}{{ .init.volumeUid }}
persistentVolumeClaim:
claimName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
# Make the volume name and pvc name available for other Resources
outputs: |
volumeName: {{ .init.volBaseName }}{{ .init.volumeUid }}
pvcName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
criteria:
- class: nfs