# tf provider declaration terraform { # required_version = ">= 0.13" required_providers { libvirt = { source = "dmacvicar/libvirt" version = "0.6.3" } } } # provider URI for libvirt provider "libvirt" { uri = "qemu:///system" } # server settings are defined in terraform.tfvars # the variables here are the defaults in case no relevant terraform.tfvars setting is found variable "projectname" { type = string default = "vms" } variable "hosts" { default = { "vm1" = { name = "vm1", vcpu = 1, memory = "1024", diskpool = "default", disksize = "4000000000", mac = "12:12:00:12:12:00", }, } } variable "baseimagediskpool" { type = string default = "default" } variable "domainname" { type = string default = "domain.local" } variable "networkname" { type = string default = "default" } variable "sourceimage" { type = string default = "https://download.fedoraproject.org/pub/fedora/linux/releases/33/Cloud/x86_64/images/Fedora-Cloud-Base-33-1.2.x86_64.qcow2" } variable "category" { type = string default = "host" } # base OS image resource "libvirt_volume" "baseosimage" { for_each = var.hosts # if desired, base image could be specified for a category of devices sharing # the base image using name = "baseosimage_${var.projectname}.${each.value.category}" name = "baseosimage_${var.projectname}.${each.value.name}" source = each.value.sourceimage pool = var.baseimagediskpool } # vdisk creation # vdisks are cloned from the base image for each of the "hosts" resource "libvirt_volume" "qcow2_volume" { for_each = var.hosts name = "${each.value.name}.qcow2" # let each baseos image have a name after the vm using it base_volume_id = libvirt_volume.baseosimage[each.value.name].id pool = each.value.diskpool # currently a hard constraint to only use qcow2, could become settable format = "qcow2" size = each.value.disksize } # Use cloudinit config file # pass certain vars to cloudinit data "template_file" "user_data" { for_each = var.hosts template = file("${path.module}/cloudinit.${each.value.category}.cfg") vars = { hostname = each.value.name domainname = var.domainname } } # cloudinit magic resource "libvirt_cloudinit_disk" "commoninit" { for_each = var.hosts name = "commoninit_${each.value.name}.iso" user_data = data.template_file.user_data[each.key].rendered } # net-lab domains loop resource "libvirt_domain" "net-lab" { for_each = var.hosts name = each.value.name cpu = { mode = "host-passthrough" } vcpu = each.value.vcpu memory = each.value.memory network_interface { network_name = var.networkname mac = each.value.mac # do not wait for a lease if networkname == "host-bridge" wait_for_lease = var.networkname == "host-bridge" ? false : true } disk { volume_id = element(libvirt_volume.qcow2_volume[each.key].*.id, 1) } cloudinit = libvirt_cloudinit_disk.commoninit[each.value.name].id } output "hostnames" { value = [libvirt_domain.net-lab.*] }