terraform-provider-kubernetes: Cannot use azure_disk as a k8s persistent volume disk in an AKS cluster
Terraform Version
0.11.10
Affected Resource(s)
- kubernetes_pod
- kubernetes_persistent_volume
Terraform Configuration Files
main.tf
provider "kubernetes" {
alias = "kubernetes"
}
provider "azurerm" {
alias = "azurerm"
}
terraform {
backend "s3" {
bucket = "HIDDEN"
key = "HIDDEN"
region = "HIDDEN"
}
}
module "storages" {
source = "./storages"
providers = {
azurerm = "azurerm.azurerm"
}
services = "${var.services}"
subscription_id = "${var.subscription_id}"
}
resource "kubernetes_namespace" "namespace" {
metadata {
name = "${terraform.workspace}"
}
}
resource "kubernetes_secret" "secret" {
metadata {
name = "docker-cfg"
namespace = "${kubernetes_namespace.namespace.metadata.0.name}"
}
data {
".dockerconfigjson" = "${file("~/.docker/config.json")}"
}
type = "kubernetes.io/dockerconfigjson"
}
resource "kubernetes_storage_class" "storage_class" {
metadata {
name = "storage-class"
}
storage_provisioner = "kubernetes.io/azure-disk"
parameters {
type = "Standard_LRS"
}
}
resource "kubernetes_persistent_volume" "volume" {
count = "${length(var.services)}"
metadata {
name = "${lookup(var.services[count.index], "name")}-volume"
}
spec {
capacity {
storage = "10Gi"
}
access_modes = ["ReadWriteMany"]
storage_class_name = "${kubernetes_storage_class.storage_class.metadata.0.name}"
persistent_volume_source {
azure_disk {
caching_mode = "None"
data_disk_uri = "/subscriptions/${var.subscription_id}/resourceGroups/mf/providers/Microsoft.Compute/disks/${module.storages.disk_name[count.index]}"
disk_name = "${module.storages.disk_name[count.index]}"
}
}
}
}
resource "kubernetes_persistent_volume_claim" "volume_claim" {
count = "${length(var.services)}"
metadata {
name = "${lookup(var.services[count.index], "name")}-volume-claim"
namespace = "${kubernetes_namespace.namespace.metadata.0.name}"
}
spec {
access_modes = ["ReadWriteMany"]
storage_class_name = "${kubernetes_storage_class.storage_class.metadata.0.name}"
resources {
requests {
storage = "5Gi"
}
}
volume_name = "${kubernetes_persistent_volume.volume.*.metadata.0.name[count.index]}"
}
}
resource "kubernetes_pod" "pods" {
count = "${length(var.services)}"
metadata {
name = "${lookup(var.services[count.index], "name")}"
labels {
name = "${lookup(var.services[count.index], "name")}"
}
namespace = "${kubernetes_namespace.namespace.metadata.0.name}"
}
spec {
image_pull_secrets = {
name = "${kubernetes_secret.secret.metadata.0.name}"
}
container {
image = "${lookup(var.services[count.index], "image")}:${lookup(var.services[count.index], "version")}"
name = "${lookup(var.services[count.index], "name")}"
port {
container_port = "${lookup(var.services[count.index], "target_port")}"
}
}
}
}
resource "kubernetes_pod" "db-pods" {
count = "${length(var.services)}"
metadata {
name = "db-${lookup(var.services[count.index], "name")}"
labels {
name = "db-${lookup(var.services[count.index], "name")}"
}
namespace = "${kubernetes_namespace.namespace.metadata.0.name}"
}
spec {
container {
image = "${lookup(var.services[count.index], "db_image")}:${lookup(var.services[count.index], "db_version")}"
name = "${lookup(var.services[count.index], "name")}-db"
port {
container_port = "${lookup(var.services[count.index], "db_port")}"
},
}
volume {
name = "db-${lookup(var.services[count.index], "name")}-volume-claim"
persistent_volume_claim {
claim_name = "${kubernetes_persistent_volume_claim.volume_claim.*.metadata.0.name[count.index]}"
}
}
}
}
resource "kubernetes_service" "services" {
count = "${length(var.services)}"
metadata {
name = "${lookup(var.services[count.index], "name")}"
namespace = "${kubernetes_namespace.namespace.metadata.0.name}"
}
spec {
selector {
name = "${lookup(var.services[count.index], "name")}"
}
session_affinity = "ClientIP"
port {
port = "${lookup(var.services[count.index], "port")}"
target_port = "${lookup(var.services[count.index], "target_port")}"
}
type = "LoadBalancer"
}
}
resource "kubernetes_service" "db-services" {
count = "${length(var.services)}"
metadata {
name = "${lookup(var.services[count.index], "name")}-db"
namespace = "${kubernetes_namespace.namespace.metadata.0.name}"
}
spec {
selector {
name = "${lookup(var.services[count.index], "name")}-db"
}
port {
port = "${lookup(var.services[count.index], "db_port")}"
target_port = "${lookup(var.services[count.index], "db_port")}"
}
}
}
variables.tf
variable "services" {
type = "list"
default = [
{
name = "web",
image = "my remote docker image",
version = "latest",
port = 80,
target_port = 3000,
db_image = "mysql",
db_version = "latest",
db_port = 3306,
},
]
}
storages/main.tf
resource "azurerm_resource_group" "group" {
name = "mf"
location = "Japan East"
}
# resource "azurerm_storage_account" "account" {
# name = "mfaccount"
# resource_group_name = "${azurerm_resource_group.group.name}"
# location = "Japan East"
# account_tier = "Standard"
# account_replication_type = "LRS"
# }
# resource "azurerm_storage_container" "container" {
# name = "mf-container"
# resource_group_name = "${azurerm_resource_group.group.name}"
# storage_account_name = "${azurerm_storage_account.account.name}"
# container_access_type = "private"
# }
# resource "azurerm_storage_blob" "blob" {
# count = "${length(var.services)}"
# name = "${lookup(var.services[count.index], "name")}-${terraform.workspace}-blob.vhd"
# resource_group_name = "${azurerm_resource_group.group.name}"
# storage_account_name = "${azurerm_storage_account.account.name}"
# storage_container_name = "${azurerm_storage_container.container.name}"
# type = "page"
# size = 1250000384
# }
resource "azurerm_managed_disk" "disk" {
count = "${length(var.services)}"
name = "${lookup(var.services[count.index], "name")}-${terraform.workspace}-disk"
location = "Japan East"
resource_group_name = "${azurerm_resource_group.group.name}"
storage_account_type = "Standard_LRS"
create_option = "Empty"
disk_size_gb = "10"
tags {
environment = "${terraform.workspace}"
}
}
output.tf
output "disk_name" {
description = "Disk name"
value = ["${azurerm_managed_disk.disk.*.name}"]
}
variables.tf
variable "services" {
type = "list"
}
Debug Output
terraform apply -auto-approve -var "subscription_id=$ARM_SUBSCRIPTION_ID"
kubernetes_namespace.namespace: Refreshing state... (ID: develop)
kubernetes_storage_class.storage_class: Refreshing state... (ID: storage-class)
kubernetes_secret.secret: Refreshing state... (ID: develop/docker-cfg)
kubernetes_service.db-services: Refreshing state... (ID: develop/web-db)
kubernetes_service.services: Refreshing state... (ID: develop/web)
kubernetes_pod.pods: Refreshing state... (ID: develop/web)
azurerm_resource_group.group: Refreshing state... (ID: /subscriptions/HIDDEN/resourceGroups/mf)
azurerm_storage_account.account: Refreshing state... (ID: /subscriptions/HIDDEN-...soft.Storage/storageAccounts/mfaccount)
kubernetes_persistent_volume.volume: Refreshing state... (ID: web-volume)
kubernetes_persistent_volume_claim.volume_claim: Refreshing state... (ID: develop/web-volume-claim)
kubernetes_pod.db-pods: Refreshing state... (ID: develop/db-web)
azurerm_storage_container.container: Refreshing state... (ID: https://mfaccount.blob.core.windows.net/mf-container)
azurerm_storage_blob.blob: Refreshing state... (ID: https://mfaccount.blob.core.windows.net/mf-container/web-develop-blob)
kubernetes_persistent_volume.volume: Destroying... (ID: web-volume)
kubernetes_secret.secret: Modifying... (ID: develop/docker-cfg)
data..dockerconfigjson: "<sensitive>" => "<sensitive>"
kubernetes_persistent_volume.volume: Destruction complete after 0s
kubernetes_secret.secret: Modifications complete after 0s (ID: develop/docker-cfg)
module.storages.azurerm_storage_blob.blob: Destroying... (ID: https://mfaccount.blob.core.windows.net/mf-container/web-develop-blob)
module.storages.azurerm_managed_disk.disk: Creating...
create_option: "" => "Empty"
disk_size_gb: "" => "10"
location: "" => "japaneast"
name: "" => "web-develop-disk"
resource_group_name: "" => "mf"
source_uri: "" => "<computed>"
storage_account_type: "" => "Standard_LRS"
tags.%: "" => "1"
tags.environment: "" => "develop"
module.storages.azurerm_storage_blob.blob: Destruction complete after 1s
module.storages.azurerm_storage_container.container: Destroying... (ID: https://mfaccount.blob.core.windows.net/mf-container)
module.storages.azurerm_storage_container.container: Destruction complete after 0s
module.storages.azurerm_storage_account.account: Destroying... (ID: /subscriptions/HIDDEN-...soft.Storage/storageAccounts/mfaccount)
module.storages.azurerm_managed_disk.disk: Creation complete after 4s (ID: /subscriptions/HIDDEN-...crosoft.Compute/disks/web-develop-disk)
kubernetes_persistent_volume.volume: Creating...
metadata.#: "" => "1"
metadata.0.generation: "" => "<computed>"
metadata.0.name: "" => "web-volume"
metadata.0.resource_version: "" => "<computed>"
metadata.0.self_link: "" => "<computed>"
metadata.0.uid: "" => "<computed>"
spec.#: "" => "1"
spec.0.access_modes.#: "" => "1"
spec.0.access_modes.1254135962: "" => "ReadWriteMany"
spec.0.capacity.%: "" => "1"
spec.0.capacity.storage: "" => "10Gi"
spec.0.persistent_volume_reclaim_policy: "" => "Retain"
spec.0.persistent_volume_source.#: "" => "1"
spec.0.persistent_volume_source.0.azure_disk.#: "" => "1"
spec.0.persistent_volume_source.0.azure_disk.0.caching_mode: "" => "None"
spec.0.persistent_volume_source.0.azure_disk.0.data_disk_uri: "" => "/subscriptions/HIDDEN/resourceGroups/mf/providers/Microsoft.Compute/disks/web-develop-disk"
spec.0.persistent_volume_source.0.azure_disk.0.disk_name: "" => "web-develop-disk"
spec.0.persistent_volume_source.0.azure_disk.0.read_only: "" => "false"
spec.0.storage_class_name: "" => "storage-class"
module.storages.azurerm_storage_account.account: Destruction complete after 4s
Error: Error applying plan:
1 error(s) occurred:
* kubernetes_persistent_volume.volume: 1 error(s) occurred:
* kubernetes_persistent_volume.volume: PersistentVolume "web-volume" is invalid: spec.azureDisk.diskURI: Unsupported value: "/subscriptions/HIDDEN/resourceGroups/mf/providers/Microsoft.Compute/disks/web-develop-disk": supported values: "https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"
Expected Behavior
I want to use an Azure storage resource as a Persistent Volume for my AKS cluster.
AKS clusters use managed disk by default, but azure_disk in Terraform documentation refers to an Azure Data Disk, resource that is deprecated and not included in azurerm provider as stated here, and is blob based (unmanaged disk).
In the code above, you’ll find two tests I did:
- Using a managed disk (
azurerm_managed_diskazurerm provider resource) - Using a standard blob storage (
azurerm_storage_blobazurerm provider resource)
Sadly none of these works, for these reasons:
- When passing a
managed diskurl to the persistent volume resource, I get the error from the logs above (basically, terraform is expecting ablob-likeurl, with account and container references) - When passing a
blob storageurl, terraform apply works fine, but then when mounting the volume claim to a pod, the pod fails to run, with this error stack:
kubectl describe pod/db-web --namespace=develop
Name: db-web
Namespace: develop
Node: aks-agentpool-89919420-0/10.240.0.4
Start Time: Thu, 01 Nov 2018 14:50:23 +0900
Labels: name=db-web
Annotations: <none>
Status: Pending
IP:
Containers:
web-db:
Container ID:
Image: mysql:latest
Image ID:
Port: 3306/TCP
Host Port: 0/TCP
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts: <none>
Conditions:
Type Status
Initialized True
Ready False
PodScheduled True
Volumes:
db-web-volume-claim:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: web-volume-claim
ReadOnly: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 12s default-scheduler Successfully assigned db-web to aks-agentpool-89919420-0
Warning FailedMount 11s attachdetach-controller AttachVolume.Attach failed for volume "web-volume" : Attach volume "web-develop-blob.vhd" to instance "aks-agentpool-89919420-0" failed with compute.VirtualMachinesClient#CreateOrUpdate: Failure responding to request: StatusCode=409 -- Original Error: autorest/azure: Service returned an error. Status=409 Code="OperationNotAllowed" Message="Addition of a blob based disk to VM with managed disks is not supported."
So eventually I tried to look if I can run an AKS cluster with unmanaged disk. But I couldn’t find anything about this.
Is the Terraform kubernetes provider not up to date with Azurerm one? Why can’t we use azure managed disks as persistent volume while azurerm provider only provides users with managed disks resources for storage?
Actual Behavior
Kubernetes provider should allow using Azure managed disks resources for persistent volume.
Steps to Reproduce
terraform apply
About this issue
- Original URL
- State: closed
- Created 6 years ago
- Reactions: 19
- Comments: 16 (1 by maintainers)
Commits related to this issue
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. This is a cherry-pick of work done by Pierre Chalamet in PR ... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. This is a cherry-pick of work done by Pierre Chalamet in PR ... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. * Add Azure Managed Disk example. * Add tests around Azure di... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. * Add Azure Managed Disk example. * Add tests for Azure disk ... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. * Add Azure Managed Disk example. * Add tests for Azure disk ... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. * Add Azure Managed Disk example. * Add tests for Azure disk ... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add azure managed disk to persistent volume resource * Add Azure Managed Disk to PV resource. * Fix broken tests around volume expansion. * Add Azure Managed Disk example. * Add tests for Azure disk ... — committed to dak1n1/terraform-provider-kubernetes by dak1n1 4 years ago
- Add Azure Managed disk to PV resource (#1011) * Add azure managed disk to persistent volume resource * Fix broken tests around volume expansion. * Add Azure Managed Disk example. * Add tests for A... — committed to hashicorp/terraform-provider-kubernetes by dak1n1 4 years ago
Actually, a working patch has been available for over a year now, but the maintainers have yet to merge it. There’s also an updated patch that builds on that one.
I’ve given up waiting so for now I have to just build my own patched copy and get on with that.
Anything anyone can do to push this along or bring it to the attention of someone who can? Seems a fix has been ready for a while now, and would unblock many people’s use cases, including mine.
This issue was opened 2 years ago it’s pretty disappointing that no maintainers/contributors have bothered to fix it even though someone linked the patch months ago. This essentially makes it impossible to use the Azure/Kubernetes providers to provision PVs in AKS. I’ve had to use the
kubectlprovider as a workaround.This issue is causing us problems as well. Would be create to get someone to review #342 to see if we could get this fixed.
I can confirm that this is still an issue. Too bad; this makes using Terraform to managed AKS volumes and managed disks useless at this time.
Closing since the feature has merged. It will go out with the next release (planned for later this week).
I also ran into this problem - would be nice to be able to set the
disk_uriparameter… I am going to do a workaround using the kubectl_manifest provider instead.Work around that worked good for me:
A working example of the kubectl yaml for Persistent Volume creation is described here: https://github.com/Azure/acs-engine/issues/2299
Parameter ‘kind: Managed’ seems to be the important bit, and I’m guessing this is missing in the Terraform module for PV’s related to Azure Managed Disks. Run referenced script without it, and we get the same error as reported above directly from kubectl.