tf-ubuntu-lineageos/vms/main.tf
surtur 9ec38f2f66
All checks were successful
continuous-integration/drone/push Build is passing
ci: add validation for terraform files
check terraform files using:
* tf fmt
* tfsec
* checkov
2021-07-09 17:53:43 +02:00

180 lines
4.7 KiB
HCL

# tf provider declaration
terraform {
# required_version = ">= 0.13"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.6.10"
}
}
}
# provider URI for libvirt
provider "libvirt" {
uri = "qemu:///system"
}
# server settings are defined in terraform.tfvars
# the variables here are the defaults in case no relevant terraform.tfvars setting is found
variable "projectname" {
type = string
default = "vms"
}
variable "hosts" {
default = {
"vm1" = {
name = "vm1",
vcpu = 1,
memory = "1024",
diskpool = "default",
disksize = 1024 * 1024 * 1024 * 4,
mac = "13:13:00:13:13:00",
},
}
}
variable "baseimagediskpool" {
type = string
default = "default"
}
variable "domainname" {
type = string
default = "domain.local"
}
variable "networkname" {
type = string
default = "default"
}
variable "sourceimage" {
type = string
# default = "https://cloud-images.ubuntu.com/focal/20210702/focal-server-cloudimg-amd64-disk-kvm.img"
default = "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img"
}
variable "category" {
type = string
default = "host"
}
variable "subnets" {
type = list(string)
}
variable "network_names" {
type = list(string)
}
variable "dhcp" {
type = list(bool)
}
variable "mode" {
type = list(string)
}
#tfsec:ignore:GEN001 - tf-ansible key is solely used for connections by ansible
variable "ssh_private_key" {
description = "the key to use for ansible stuff"
default = "~/.ssh/tf-ansible"
}
# base OS image
resource "libvirt_volume" "baseosimage" {
for_each = var.hosts
# if desired, base image could be specified for a category of devices sharing
# the base image using name = "baseosimage_${var.projectname}.${each.value.category}"
name = "baseosimage_${var.projectname}.${each.value.name}"
source = each.value.sourceimage
pool = var.baseimagediskpool
}
# vdisk creation
# vdisks are cloned from the base image for each of the "hosts"
resource "libvirt_volume" "qcow2_volume" {
for_each = var.hosts
name = "${each.value.name}.img"
# let each baseos image have a name after the vm using it
base_volume_id = libvirt_volume.baseosimage[each.value.name].id
pool = each.value.diskpool
# currently a hard constraint to only use qcow2, could become settable
format = "qcow2"
size = each.value.disksize
}
# Use cloudinit config file
# pass certain vars to cloudinit
data "template_file" "user_data" {
for_each = var.hosts
template = file("${path.module}/cloudinit.${each.value.name}.cfg")
vars = {
hostname = each.value.name
domainname = var.domainname
}
}
# cloudinit magic
resource "libvirt_cloudinit_disk" "commoninit" {
for_each = var.hosts
name = "commoninit_${each.value.name}.iso"
user_data = data.template_file.user_data[each.key].rendered
}
resource "libvirt_network" "additional_networks" {
count = length(var.subnets)
name = var.network_names[count.index]
bridge = "br_${var.network_names[count.index]}"
mode = var.mode[count.index]
addresses = ["${var.subnets[count.index]}"]
dhcp {
enabled = var.dhcp[count.index]
}
}
# LineageOS-builder lab domains loop
resource "libvirt_domain" "losbuilder-lab" {
for_each = var.hosts
name = each.value.name
cpu = {
mode = "host-passthrough"
}
vcpu = each.value.vcpu
memory = each.value.memory
network_interface {
network_name = var.networkname
mac = each.value.mac
# do not wait for a lease if networkname == "host-bridge"
wait_for_lease = substr(var.networkname, 0, 2) == "br" ? true : false
}
dynamic "network_interface" {
for_each = each.value.network
content {
network_name = network_interface.value["name"]
mac = network_interface.value["mac"]
addresses = network_interface.value["address"]
wait_for_lease = network_interface.value["dhcp"]
}
}
disk {
volume_id = element(libvirt_volume.qcow2_volume[each.key].*.id, 1)
}
cloudinit = libvirt_cloudinit_disk.commoninit[each.value.name].id
provisioner "local-exec" {
command = <<EOF
echo "[losbuilder-lab]" > ${path.module}/../hosts
echo builder >> ${path.module}/../hosts
echo "[losbuilder-lab:vars]" >> ${path.module}/../hosts
ssh -i${var.ssh_private_key} -T ansible@builder || sleep 60
cd ..; ansible-playbook -u ansible --private-key ${var.ssh_private_key} -i ${path.module}/hosts playbooks/losbuilder-lab.yml
EOF
}
provisioner "local-exec" {
command = "rm -v ${path.module}/../hosts"
when = destroy
}
}
output "hostnames" {
value = [libvirt_domain.losbuilder-lab.*]
}