# tf provider declaration terraform { # required_version = ">= 0.13" required_providers { libvirt = { source = "dmacvicar/libvirt" version = "0.6.3" } } } # provider URI for libvirt provider "libvirt" { uri = "qemu:///system" } # server settings are defined in terraform.tfvars # the variables here are the defaults in case no relevant terraform.tfvars setting is found variable "projectname" { type = string default = "vms" } variable "hosts" { default = { "vm1" = { name = "vm1", vcpu = 1, memory = "1024", diskpool = "default", disksize = "4000000000", mac = "12:12:00:12:12:00", }, } } variable "baseimagediskpool" { type = string default = "default" } variable "domainname" { type = string default = "domain.local" } variable "networkname" { type = string default = "default" } variable "sourceimage" { type = string default = "https://download.fedoraproject.org/pub/fedora/linux/releases/33/Cloud/x86_64/images/Fedora-Cloud-Base-33-1.2.x86_64.qcow2" } variable "category" { type = string default = "host" } variable "subnets" { type = list(string) } variable "network_names" { type = list(string) } variable "dhcp" { type = list(bool) } variable "ssh_private_key" { description = "the key to use for ansible stuff" default = "~/.ssh/tf-ansible" } # base OS image resource "libvirt_volume" "baseosimage" { for_each = var.hosts # if desired, base image could be specified for a category of devices sharing # the base image using name = "baseosimage_${var.projectname}.${each.value.category}" name = "baseosimage_${var.projectname}.${each.value.name}" source = each.value.sourceimage pool = var.baseimagediskpool } # vdisk creation # vdisks are cloned from the base image for each of the "hosts" resource "libvirt_volume" "qcow2_volume" { for_each = var.hosts name = "${each.value.name}.qcow2" # let each baseos image have a name after the vm using it base_volume_id = libvirt_volume.baseosimage[each.value.name].id pool = each.value.diskpool # currently a hard constraint to only use qcow2, could become settable format = "qcow2" size = each.value.disksize } # Use cloudinit config file # pass certain vars to cloudinit data "template_file" "user_data" { for_each = var.hosts template = file("${path.module}/cloudinit.${each.value.category}.cfg") vars = { hostname = each.value.name domainname = var.domainname } } # cloudinit magic resource "libvirt_cloudinit_disk" "commoninit" { for_each = var.hosts name = "commoninit_${each.value.name}.iso" user_data = data.template_file.user_data[each.key].rendered } resource "libvirt_network" "additional_networks" { count = length(var.subnets) name = var.network_names[count.index] bridge = "br_${var.network_names[count.index]}" mode = var.dhcp[count.index] == true ? "bridge" : "route" addresses = ["${var.subnets[count.index]}"] dhcp { enabled = var.dhcp[count.index] } } # net-lab domains loop resource "libvirt_domain" "net-lab" { for_each = var.hosts name = each.value.name cpu = { mode = "host-passthrough" } vcpu = each.value.vcpu memory = each.value.memory network_interface { network_name = var.networkname mac = each.value.mac # do not wait for a lease if networkname == "host-bridge" wait_for_lease = substr(var.networkname, 0, 2) == "br" ? true : false } dynamic "network_interface" { for_each = each.value.network content { network_name = network_interface.value["name"] mac = network_interface.value["mac"] addresses = network_interface.value["address"] wait_for_lease = substr(network_interface.value.mode, 0, 2) == "br" ? true : false } } disk { volume_id = element(libvirt_volume.qcow2_volume[each.key].*.id, 1) } cloudinit = libvirt_cloudinit_disk.commoninit[each.value.name].id provisioner "local-exec" { command = < ${path.module}/../hosts echo h_defender >> ${path.module}/../hosts echo "[defender:vars]" >> ${path.module}/../hosts echo "[router]" >> ${path.module}/../hosts echo r_upstream >> ${path.module}/../hosts echo r_edge >> ${path.module}/../hosts echo "[router:vars]" >> ${path.module}/../hosts echo "[attacker]" >> ${path.module}/../hosts echo h_attacker >> ${path.module}/../hosts echo "[attacker:vars]" >> ${path.module}/../hosts echo "[victim]" >> ${path.module}/../hosts echo h_victim >> ${path.module}/../hosts echo "[victim:vars]" >> ${path.module}/../hosts sleep 200 cd ..; ansible-playbook -u ansible --private-key ${var.ssh_private_key} -i ${path.module}/hosts playbooks/lab.yml EOF } provisioner "local-exec" { command = "rm -v ${path.module}/hosts" when = destroy } } output "hostnames" { value = [libvirt_domain.net-lab.*] }