terraform-provider-vsphere: Failed to query disk type

Terraform Version

0.12.28

vSphere Provider Version

1.22.0+

vSphere Version

6.5

Affected Resource(s)

vsphere_virtual_disk

Terraform Configuration Files

locals {
  host_basename = coalesce(
    var.hostname_override,
    "${var.environment}-${var.application}-${var.component}",
  )
  role = "${var.application}-${var.component}"
}

data "vsphere_datacenter" "datacenter" {
  name = var.vsphere_datacenter
}

data "vsphere_compute_cluster" "cluster" {
  name          = var.vsphere_cluster
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

data "vsphere_datastore" "datastore" {
  name          = var.vsphere_datastore
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

data "vsphere_resource_pool" "pool" {
  name          = "${var.vsphere_cluster}/Resources"
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

data "vsphere_network" "network" {
  name          = var.vsphere_network_label
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

data "vsphere_virtual_machine" "template" {
  name          = var.vsphere_template
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

# DNS for instances
module "instance-dns" {
  source = "../instance_dns"

  instance_count    = var.instance_count
  service_provider  = "op"
  ttl               = var.vsphere_network_ipv4_addresses != "" ? 900 : 60
  region            = lower(var.region)
  create_regionless = false
  hostname          = local.host_basename
  ips               = vsphere_virtual_machine.instance.*.default_ip_address
  allow_overwrite   = var.allow_dns_overwrite
}

# Instance Resource
resource "vsphere_virtual_machine" "instance" {
  depends_on = [vsphere_virtual_disk.data_disk]

  count            = var.instance_count
  name             = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}"
  resource_pool_id = data.vsphere_resource_pool.pool.id
  datastore_id     = data.vsphere_datastore.datastore.id
  guest_id         = data.vsphere_virtual_machine.template.guest_id
  scsi_type        = data.vsphere_virtual_machine.template.scsi_type

  folder = var.vsphere_folder_path

  num_cpus           = var.vsphere_vcpu
  memory             = var.vsphere_memory
  memory_reservation = var.vsphere_reserved_memory

  enable_disk_uuid           = true
  wait_for_guest_net_timeout = var.vsphere_network_ipv4_addresses != "" ? 5 : 180
  sync_time_with_host        = var.sync_time_with_host

  # Prevent attributes from going null in 0.12
  custom_attributes = {}
  extra_config      = {}
  tags              = []

  network_interface {
    network_id     = data.vsphere_network.network.id
    adapter_type   = data.vsphere_virtual_machine.template.network_interface_types[0]
    mac_address    = var.static_macs != "" ? element(split(",", var.static_macs), count.index) : ""
    use_static_mac = var.static_macs == "" ? false : true
  }

  // This doesn't actually work and is a work around for customize spec.
  cdrom {
    datastore_id = data.vsphere_datastore.datastore.id
    path         = "ISOs/os-livecd.iso"
  }

  disk {
    path             = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}.vmdk"
    label            = "disk0"
    size             = var.root_disk_size
    eagerly_scrub    = data.vsphere_virtual_machine.template.disks[0].eagerly_scrub
    thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned
  }

  dynamic "disk" {
    for_each = flatten(length(vsphere_virtual_disk.data_disk) > 0 ? [element(vsphere_virtual_disk.data_disk, count.index)] : [])

    content {
      path         = disk.value.vmdk_path
      label        = "disk1"
      attach       = true
      unit_number  = 1
      datastore_id = data.vsphere_datastore.datastore.id
    }
  }

  clone {
    template_uuid = data.vsphere_virtual_machine.template.id

    customize {
      dns_suffix_list = concat(["${lower(var.region)}.${lower(var.service_provider)}.example.com"], split(",", var.vsphere_network_domain_search))
      dns_server_list = split(
        ",",
        var.vsphere_network_ipv4_addresses != "" ? var.vsphere_network_domain_resolvers : "",
      )

      linux_options {
        host_name = "${lower(local.host_basename)}${count.index + 1}"
        domain    = "${lower(var.region)}.${lower(var.service_provider)}.example.com"
        time_zone = var.vsphere_cluster_timezone
      }

      network_interface {
        ipv4_address = element(split(",", var.vsphere_network_ipv4_addresses), count.index)
        ipv4_netmask = var.vsphere_network_ipv4_prefix_length
      }

      ipv4_gateway = var.vsphere_network_ipv4_gateway
    }
  }

  lifecycle {
    ignore_changes = [
      disk,
      clone,
      poweron_timeout,
      ide_controller_count,
      sata_controller_count,
    ]
  }

  provisioner "local-exec" {
    when = destroy

    interpreter = ["bash", "-c"]

    command = <<EOT
...
EOT

  }
}

resource "vsphere_virtual_disk" "data_disk" {
  count      = var.data_disk_size > 0 ? var.instance_count : 0
  size       = var.data_disk_size
  vmdk_path  = "${lower(local.host_basename)}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
  datacenter = var.vsphere_datacenter
  datastore  = var.vsphere_datastore
  type       = "thin"

  lifecycle {
    prevent_destroy = false
  }
}

# Create Virtual Machine Anti-Affinity Rules
resource "vsphere_compute_cluster_vm_anti_affinity_rule" "cluster_vm_anti_affinity_rule" {
  count               = var.instance_count > 0 ? 1 : 0
  name                = "${lower(local.host_basename)}.${lower(var.region)}.${lower(var.service_provider)}"
  compute_cluster_id  = data.vsphere_compute_cluster.cluster.id
  virtual_machine_ids = vsphere_virtual_machine.instance.*.id
}

# Fun hack explained here https://github.com/hashicorp/terraform/issues/16580#issuecomment-342573652
output "instance_ids" {
  value = vsphere_virtual_machine.instance.*.uuid
}

output "instances_dns" {
  value = formatlist(
    "%s.%s",
    concat(vsphere_virtual_machine.instance.*.name, [""]),
    "int.example.com",
  )
}

output "instance_private_ips" {
  value      = vsphere_virtual_machine.instance.*.default_ip_address
  depends_on = [vsphere_virtual_machine.instance]
}

Debug Output

Panic Output

na

Expected Behavior

Apply successfully like in 1.21.0

Actual Behavior

Dies with failed to query disk type

Steps to Reproduce

terraform apply

References

Bug was introduced with https://github.com/hashicorp/terraform-provider-vsphere/commit/4fd6f8e89ec3cf9320554503689a60e464fb8bd4

Community Note

  • Please vote on this issue by adding a 👍 reaction to the original issue to help the community and maintainers prioritize this request
  • Please do not leave “+1” or other comments that do not add relevant new information or questions, they generate extra noise for issue followers and do not help prioritize the request
  • If you are interested in working on this issue or have submitted a pull request, please leave a comment

About this issue

  • Original URL
  • State: open
  • Created 4 years ago
  • Reactions: 5
  • Comments: 23 (3 by maintainers)

Commits related to this issue

Most upvoted comments

@tenthirtyam Tried to cut this down to a simple single file as we use multiple modules.

It’s an issue on 6.5, 6.7, 7.0.

Only solution for us is to pin provider version to 1.21.0.

terraform {
  required_version = ">= 0.13"
  required_providers {
    vsphere = {
      source  = "hashicorp/vsphere"
      #version = "1.21.0" # last working version
    }
  }
}

provider "vsphere" {
  user           = var.vsphere_user
  password       = var.vsphere_password
  vsphere_server = var.vsphere_server
}

variable "environment" {
  default = "test"
}
variable "swarm_count" {
  default = "1"
}
variable "disk_size" {
  default = "1"
}
variable "additional_disks" {
  default = "3"
}
variable "additional_disks_size" {
  default = "1"
}
variable "vsphere_server" {
  default = ""
}
variable "vsphere_datacenter" {
  default = ""
}
variable "vsphere_datastore" {
  default = ""
}
variable "vsphere_resource_pool" {
  default = ""
}
variable "vsphere_network" {
  default = ""
}
variable "vsphere_cluster" {
  default = ""
}
variable "vsphere_template" {
  default = ""
}
variable "vsphere_folder_path" {
  default = ""
}

locals {
  node_name = "1191repro"
  disk-list = flatten([
    for disk in range(1, var.additional_disks + 1, 1) : [
      for node in range(1, var.swarm_count + 1, 1) : {
        node = node
        disk = disk
      }
    ]
  ])
}


data "vsphere_datacenter" "dc" {
  name = var.vsphere_datacenter
}
data "vsphere_datastore" "datastore" {
  name          = var.vsphere_datastore
  datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_resource_pool" "pool" {
  name          = var.vsphere_resource_pool
  datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_network" "network" {
  name          = var.vsphere_network
  datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_compute_cluster" "cluster" {
  name          = var.vsphere_cluster
  datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_virtual_machine" "template" {
  name          = "${var.vsphere_template}-${var.environment}"
  datacenter_id = data.vsphere_datacenter.dc.id
}

resource "vsphere_virtual_machine" "vm" {
  count            = var.swarm_count
  depends_on       = [time_sleep.disk_wait, vsphere_virtual_disk.additional_disk]
  name             = "1191repro"
  resource_pool_id = data.vsphere_resource_pool.pool.id
  datastore_id     = data.vsphere_datastore.datastore.id
  folder           = var.vsphere_folder_path

  num_cpus                   = 1
  memory                     = 1024
  guest_id                   = data.vsphere_virtual_machine.template.guest_id
  firmware                   = data.vsphere_virtual_machine.template.firmware
  scsi_type                  = data.vsphere_virtual_machine.template.scsi_type
  sync_time_with_host        = true
  wait_for_guest_ip_timeout  = 300
  shutdown_wait_timeout      = 1
  force_power_off            = true

  disk {
    label            = "bootdisk"
    size             = data.vsphere_virtual_machine.template.disks.0.size > var.disk_size ? data.vsphere_virtual_machine.template.disks.0.size : var.disk_size
    eagerly_scrub    = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub
    thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned
  }

  dynamic "disk" {
    for_each = range(var.additional_disks)

    content {
      path         = "test/${var.environment}/${local.node_name}${count.index + 1}/disk${local.disk-list[(disk.value * var.swarm_count)].disk}.vmdk"
      label        = "${local.node_name}${count.index + 1}-disk${local.disk-list[(disk.value * var.swarm_count)].disk}"
      attach       = true
      unit_number  = local.disk-list[(disk.value * var.swarm_count)].disk
      datastore_id = data.vsphere_datastore.datastore.id
    }
  }

  network_interface {
    network_id = data.vsphere_network.network.id
  }

  clone {
    template_uuid = data.vsphere_virtual_machine.template.id
  }
}

resource "time_sleep" "disk_wait" {
  depends_on       = [vsphere_virtual_disk.additional_disk]
  destroy_duration = "20s"
}

resource "vsphere_virtual_disk" "additional_disk" {
  count              = var.additional_disks
  size               = var.additional_disks_size
  vmdk_path          = "test/${var.environment}/${local.node_name}${local.disk-list[count.index].node}/disk${local.disk-list[count.index].disk}.vmdk"
  datacenter         = var.vsphere_datacenter
  datastore          = var.vsphere_datastore
  type               = "thin"
  create_directories = true
}

@tenthirtyam Sincere apologies I haven’t had the chance to go back to my team and try to get a repro for this. Will do as soon as I can! 👌

I took a quick look at the linked PR @tenthirtyam , I’m not sure it’s appropriate to exit early if no disk type can be read, why is it that this attribute fails, do disks not always have a disk type? I worry merging that PR will cause state drift.

@appilon, the change in 1447 seems pretty innocuous to me to log a warning rather than error/fail. What are your thought?

Ryan

@bill-rich @pryorda @anupugalavat Are you guys able to review my colleague’s fix for this in #1447 ? Our work-around at the moment is to pin the vsphere provider to 1.21.0.