tf-defender/vms/main.tf

173 lines
4.2 KiB
HCL

# tf provider declaration
terraform {
# required_version = ">= 0.13"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.6.3"
}
}
}
# provider URI for libvirt
provider "libvirt" {
uri = "qemu:///system"
}
# server settings are defined in terraform.tfvars
# the variables here are the defaults in case no relevant terraform.tfvars setting is found
variable "projectname" {
type = string
default = "vms"
}
variable "hosts" {
default = {
"vm1" = {
name = "vm1",
vcpu = 1,
memory = "1024",
diskpool = "default",
disksize = "4000000000",
mac = "12:12:00:12:12:00",
},
}
}
variable "baseimagediskpool" {
type = string
default = "default"
}
variable "domainname" {
type = string
default = "domain.local"
}
variable "networkname" {
type = string
default = "default"
}
variable "sourceimage" {
type = string
default = "https://download.fedoraproject.org/pub/fedora/linux/releases/33/Cloud/x86_64/images/Fedora-Cloud-Base-33-1.2.x86_64.qcow2"
}
variable "category" {
type = string
default = "host"
}
variable "subnets" {
type = list(string)
}
variable "network_names" {
type = list(string)
}
variable "dhcp" {
type = list(bool)
}
variable "ssh_private_key" {
description = "the key to use for ansible stuff"
default = "~/.ssh/tf-ansible"
}
# base OS image
resource "libvirt_volume" "baseosimage" {
for_each = var.hosts
# if desired, base image could be specified for a category of devices sharing
# the base image using name = "baseosimage_${var.projectname}.${each.value.category}"
name = "baseosimage_${var.projectname}.${each.value.name}"
source = each.value.sourceimage
pool = var.baseimagediskpool
}
# vdisk creation
# vdisks are cloned from the base image for each of the "hosts"
resource "libvirt_volume" "qcow2_volume" {
for_each = var.hosts
name = "${each.value.name}.qcow2"
# let each baseos image have a name after the vm using it
base_volume_id = libvirt_volume.baseosimage[each.value.name].id
pool = each.value.diskpool
# currently a hard constraint to only use qcow2, could become settable
format = "qcow2"
size = each.value.disksize
}
# Use cloudinit config file
# pass certain vars to cloudinit
data "template_file" "user_data" {
for_each = var.hosts
template = file("${path.module}/cloudinit.${each.value.category}.cfg")
vars = {
hostname = each.value.name
domainname = var.domainname
}
}
# cloudinit magic
resource "libvirt_cloudinit_disk" "commoninit" {
for_each = var.hosts
name = "commoninit_${each.value.name}.iso"
user_data = data.template_file.user_data[each.key].rendered
}
resource "libvirt_network" "additional_networks" {
count = length(var.subnets)
name = var.network_names[count.index]
# bridge = "br_${var.network_names[count.index]}"
mode = "route"
addresses = ["${var.subnets[count.index]}"]
dhcp {
enabled = var.dhcp[count.index]
}
}
# net-lab domains loop
resource "libvirt_domain" "defender-lab" {
for_each = var.hosts
name = each.value.name
cpu = {
mode = "host-passthrough"
}
vcpu = each.value.vcpu
memory = each.value.memory
network_interface {
network_name = var.networkname
mac = each.value.mac
# do not wait for a lease if networkname == "host-bridge"
wait_for_lease = var.networkname == "host-bridge" ? false : true
}
dynamic "network_interface" {
for_each = each.value.network
content {
network_name = network_interface.value["name"]
mac = network_interface.value["mac"]
addresses = network_interface.value["address"]
wait_for_lease = network_interface.value.mode == "bridge" ? false : true
}
}
disk {
volume_id = element(libvirt_volume.qcow2_volume[each.key].*.id, 1)
}
cloudinit = libvirt_cloudinit_disk.commoninit[each.value.name].id
provisioner "local-exec" {
command = <<EOF
cd ..
echo "[defender]" > d_hosts
echo h_defender >> d_hosts
echo "[defender:vars]" >> d_hosts
ansible-playbook -u ansible --private-key ${var.ssh_private_key} -i d_hosts playbooks/defender.yml
EOF
}
}
output "hostnames" {
value = [libvirt_domain.defender-lab.*]
}