Volumes
This section contains Resource Definition examples for handling Kubernetes Volumes by using the Template
Driver to configure your own volume implementation.
You will find these examples:
volume-configmap
: injects aconfigMap
volume into a Workload for any request of avolume
resource with theclass
config
volume-dynamic-provisioning
: dynamic provisioning of a PersistentVolume. Creates the associatedPersistentVolumeClaim
object, and injects thevolume
into a Workload for any request of avolume
resource with theclass
standard-rwo
volume-emptydir
: injects anemptyDir
volume into a Workload for any request of avolume
resource with theclass
ephemeral
volume-nfs
: static provisioning of a PersistentVolume. Creates the associatedPersistentVolumeClaim
andPersistentVolume
objects, and injects thevolume
into a Workload for any request of avolume
resource with theclass
nfs
volume-projected
: injects aprojected
volume into a Workload for any request of avolume
with theclass
projected
You can find a Score file example using the volume
resource type here .
To see examples for the convenience Drivers, see the volume-pvc
Driver and volume-nfs
Driver examples.
Resource Definitions
volume-configMap.yaml
(view on GitHub )
:
# This Resource Definition uses the Template Driver to create a volume accessing a ConfigMap
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-configmap
entity:
name: volume-configmap
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests:
configmap.yaml:
location: volumes
data: |
name: ${context.res.guresid}-configmap
configMap:
# The ConfigMap named here needs to exist. The Resource Definition does not create it
name: log-config
items:
- key: log_level
path: log_level.conf
criteria:
- class: config
volume-dynamic-provisioning.yaml
(view on GitHub )
:
# Using the Template Driver for the dynamic provisioning of
# a Kubernetes PersistentVolume and PersistentVolumeClaim combination,
# then adding the volume into the Pod of the Workload.
# The PVC requests a storageClass "standard-rwo".
# The volumeMount in the container is defined in the "workload" type Resource Definition.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-standard-dynamic
entity:
name: volume-standard-dynamic
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
init: |
# Generate a unique id for each pv/pvc combination.
# Every Workload will have a separate pv and pvc created for it,
# but pointing to the same NFS server endpoint.
volumeUid: {{ randNumeric 4 }}-{{ randNumeric 4 }}
pvBaseName: pv-tmpl-
pvcBaseName: pvc-tmpl-
volBaseName: vol-tmpl-
manifests:
#########################################################################
# This template creates the PersistentVolumeClaim in the target namespace
#########################################################################
app-pvc-tmpl.yaml:
location: namespace
data: |
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: "standard-rwo"
resources:
requests:
storage: 10Gi
volumeName: {{ .init.pvBaseName }}{{ .init.volumeUid }}
########################################################
# This template creates the volume in the Workload's Pod
########################################################
app-vol-tmpl.yaml:
location: volumes
data: |
name: {{ .init.volBaseName }}{{ .init.volumeUid }}
persistentVolumeClaim:
claimName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
# Make the volume name and pvc name available for other Resources
outputs: |
volumeName: {{ .init.volBaseName }}{{ .init.volumeUid }}
pvcName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
criteria:
- class: standard-rwo
volume-emptydir.yaml
(view on GitHub )
:
# This Resource Definition uses the Template Driver to inject an emptyDir volume into the workload
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-emptydir
entity:
name: volume-emptydir
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests:
emptydir.yaml:
location: volumes
data: |
name: ${context.res.guresid}-emptydir
emptyDir:
sizeLimit: 1024Mi
criteria:
- class: ephemeral
volume-nfs.yaml
(view on GitHub )
:
# Using the Template Driver for the static provisioning of
# a Kubernetes PersistentVolume and PersistentVolumeClaim combination,
# then adding the volume into the Pod of the Workload.
# The volumeMount in the container is defined in the "workload" type Resource Definition.
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-nfs
entity:
name: volume-nfs
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
cookie: |
# Store the volumeUid in a cookie to be reused for subsequent deployments
volumeUid: {{ .init.volumeUid }}
init: |
# Generate a unique id for each pv/pvc combination.
# Every Workload will have a separate pv and pvc created for it,
# but pointing to the same NFS server endpoint.
{{- if and .cookie .cookie.volumeUid }}
volumeUid: {{ .cookie.volumeUid }}
{{- else }}
volumeUid: {{ randNumeric 4 }}-{{ randNumeric 4 }}
{{- end }}
pvBaseName: pv-tmpl-
pvcBaseName: pvc-tmpl-
volBaseName: vol-tmpl-
manifests:
####################################################################
# This template creates the PersistentVolume in the target namespace
# Modify the nfs server and path to address your NFS server
####################################################################
app-pv-tmpl.yaml:
location: namespace
data: |
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ .init.pvBaseName }}{{ .init.volumeUid }}
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
nfs:
server: nfs-server.default.svc.cluster.local
path: "/"
mountOptions:
- nfsvers=4.2
#########################################################################
# This template creates the PersistentVolumeClaim in the target namespace
#########################################################################
app-pvc-tmpl.yaml:
location: namespace
data: |
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 1Mi
volumeName: {{ .init.pvBaseName }}{{ .init.volumeUid }}
########################################################
# This template creates the volume in the Workload's Pod
########################################################
app-vol-tmpl.yaml:
location: volumes
data: |
name: {{ .init.volBaseName }}{{ .init.volumeUid }}
persistentVolumeClaim:
claimName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
# Make the volume name and pvc name available for other Resources
outputs: |
volumeName: {{ .init.volBaseName }}{{ .init.volumeUid }}
pvcName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
criteria:
- class: nfs
volume-projected.yaml
(view on GitHub )
:
# This Resource Definition uses the Template Driver to create a projected volume
# accessing a ConfigMap and the downwardAPI
apiVersion: entity.humanitec.io/v1b1
kind: Definition
metadata:
id: volume-projected
entity:
name: volume-projected
type: volume
driver_type: humanitec/template
driver_inputs:
values:
templates:
manifests:
projected.yaml:
location: volumes
data: |
name: ${context.res.guresid}-projected
projected:
sources:
- downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- configMap:
# The ConfigMap named here needs to exist. The Resource Definition does not create it
name: log-config
items:
- key: log_level
path: log_level.conf
criteria:
- class: projected
volume-configMap.tf
(view on GitHub )
:
resource "humanitec_resource_definition" "volume-configmap" {
driver_type = "humanitec/template"
id = "volume-configmap"
name = "volume-configmap"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = {
"configmap.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: $${context.res.guresid}-configmap
configMap:
# The ConfigMap named here needs to exist. The Resource Definition does not create it
name: log-config
items:
- key: log_level
path: log_level.conf
END_OF_TEXT
}
}
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-configmap_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-configmap.id
class = "config"
}
volume-dynamic-provisioning.tf
(view on GitHub )
:
resource "humanitec_resource_definition" "volume-standard-dynamic" {
driver_type = "humanitec/template"
id = "volume-standard-dynamic"
name = "volume-standard-dynamic"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"init" = <<END_OF_TEXT
# Generate a unique id for each pv/pvc combination.
# Every Workload will have a separate pv and pvc created for it,
# but pointing to the same NFS server endpoint.
volumeUid: {{ randNumeric 4 }}-{{ randNumeric 4 }}
pvBaseName: pv-tmpl-
pvcBaseName: pvc-tmpl-
volBaseName: vol-tmpl-
END_OF_TEXT
"manifests" = {
"app-pvc-tmpl.yaml" = {
"location" = "namespace"
"data" = <<END_OF_TEXT
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: "standard-rwo"
resources:
requests:
storage: 10Gi
volumeName: {{ .init.pvBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
"app-vol-tmpl.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: {{ .init.volBaseName }}{{ .init.volumeUid }}
persistentVolumeClaim:
claimName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
}
"outputs" = <<END_OF_TEXT
volumeName: {{ .init.volBaseName }}{{ .init.volumeUid }}
pvcName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-standard-dynamic_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-standard-dynamic.id
class = "standard-rwo"
}
volume-emptydir.tf
(view on GitHub )
:
resource "humanitec_resource_definition" "volume-emptydir" {
driver_type = "humanitec/template"
id = "volume-emptydir"
name = "volume-emptydir"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = {
"emptydir.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: $${context.res.guresid}-emptydir
emptyDir:
sizeLimit: 1024Mi
END_OF_TEXT
}
}
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-emptydir_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-emptydir.id
class = "ephemeral"
}
volume-nfs.tf
(view on GitHub )
:
resource "humanitec_resource_definition" "volume-nfs" {
driver_type = "humanitec/template"
id = "volume-nfs"
name = "volume-nfs"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"cookie" = <<END_OF_TEXT
# Store the volumeUid in a cookie to be reused for subsequent deployments
volumeUid: {{ .init.volumeUid }}
END_OF_TEXT
"init" = <<END_OF_TEXT
# Generate a unique id for each pv/pvc combination.
# Every Workload will have a separate pv and pvc created for it,
# but pointing to the same NFS server endpoint.
{{- if and .cookie .cookie.volumeUid }}
volumeUid: {{ .cookie.volumeUid }}
{{- else }}
volumeUid: {{ randNumeric 4 }}-{{ randNumeric 4 }}
{{- end }}
pvBaseName: pv-tmpl-
pvcBaseName: pvc-tmpl-
volBaseName: vol-tmpl-
END_OF_TEXT
"manifests" = {
"app-pv-tmpl.yaml" = {
"location" = "namespace"
"data" = <<END_OF_TEXT
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ .init.pvBaseName }}{{ .init.volumeUid }}
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
nfs:
server: nfs-server.default.svc.cluster.local
path: "/"
mountOptions:
- nfsvers=4.2
END_OF_TEXT
}
"app-pvc-tmpl.yaml" = {
"location" = "namespace"
"data" = <<END_OF_TEXT
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 1Mi
volumeName: {{ .init.pvBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
"app-vol-tmpl.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: {{ .init.volBaseName }}{{ .init.volumeUid }}
persistentVolumeClaim:
claimName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
}
"outputs" = <<END_OF_TEXT
volumeName: {{ .init.volBaseName }}{{ .init.volumeUid }}
pvcName: {{ .init.pvcBaseName }}{{ .init.volumeUid }}
END_OF_TEXT
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-nfs_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-nfs.id
class = "nfs"
}
volume-projected.tf
(view on GitHub )
:
resource "humanitec_resource_definition" "volume-projected" {
driver_type = "humanitec/template"
id = "volume-projected"
name = "volume-projected"
type = "volume"
driver_inputs = {
values_string = jsonencode({
"templates" = {
"manifests" = {
"projected.yaml" = {
"location" = "volumes"
"data" = <<END_OF_TEXT
name: $${context.res.guresid}-projected
projected:
sources:
- downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- configMap:
# The ConfigMap named here needs to exist. The Resource Definition does not create it
name: log-config
items:
- key: log_level
path: log_level.conf
END_OF_TEXT
}
}
}
})
}
}
resource "humanitec_resource_definition_criteria" "volume-projected_criteria_0" {
resource_definition_id = resource.humanitec_resource_definition.volume-projected.id
class = "projected"
}