1
0
Fork 0
mirror of https://github.com/poseidon/typhoon synced 2024-05-26 09:26:24 +02:00

Rerun terraform fmt

This commit is contained in:
Dalton Hubble 2018-05-01 21:41:22 -07:00
parent cc29530ba0
commit 9d4cbb38f6
8 changed files with 50 additions and 41 deletions

View File

@ -1,7 +1,7 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=911f4115088b7511f29221f64bf8e93bfa9ee567"
cluster_name = "${var.cluster_name}"
api_servers = ["${var.k8s_domain_name}"]
etcd_servers = ["${var.controller_domains}"]

View File

@ -1,6 +1,6 @@
locals {
default_assets_endpoint = "${var.matchbox_http_endpoint}/assets/fedora/27"
atomic_assets_endpoint = "${var.atomic_assets_endpoint != "" ? var.atomic_assets_endpoint : local.default_assets_endpoint}"
atomic_assets_endpoint = "${var.atomic_assets_endpoint != "" ? var.atomic_assets_endpoint : local.default_assets_endpoint}"
}
// Cached Fedora Install profile (from matchbox /assets cache)
@ -36,14 +36,15 @@ data "template_file" "install-kickstarts" {
vars {
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
atomic_assets_endpoint = "${local.atomic_assets_endpoint}"
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
}
}
// Kubernetes Controller profiles
resource "matchbox_profile" "controllers" {
count = "${length(var.controller_names)}"
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
count = "${length(var.controller_names)}"
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
# cloud-init
generic_config = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
}
@ -65,8 +66,9 @@ data "template_file" "controller-configs" {
// Kubernetes Worker profiles
resource "matchbox_profile" "workers" {
count = "${length(var.worker_names)}"
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
count = "${length(var.worker_names)}"
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
# cloud-init
generic_config = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
}

View File

@ -11,8 +11,9 @@ variable "matchbox_http_endpoint" {
}
variable "atomic_assets_endpoint" {
type = "string"
type = "string"
default = ""
description = <<EOD
HTTP endpoint serving the Fedora Atomic Host vmlinuz, initrd, os repo, and ostree repo (.e.g `http://example.com/some/path`).

View File

@ -46,7 +46,7 @@ resource "digitalocean_droplet" "controllers" {
user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
ssh_keys = ["${var.ssh_fingerprints}"]
tags = [
"${digitalocean_tag.controllers.id}",
]

View File

@ -14,43 +14,45 @@ resource "google_dns_record_set" "apiserver" {
# Static IPv4 address for the TCP Proxy Load Balancer
resource "google_compute_global_address" "apiserver-ipv4" {
name = "${var.cluster_name}-apiserver-ip"
name = "${var.cluster_name}-apiserver-ip"
ip_version = "IPV4"
}
# Forward IPv4 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "apiserver" {
name = "${var.cluster_name}-apiserver"
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
name = "${var.cluster_name}-apiserver"
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
port_range = "443"
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
}
# Global TCP Proxy Load Balancer for apiservers
resource "google_compute_target_tcp_proxy" "apiserver" {
name = "${var.cluster_name}-apiserver"
description = "Distribute TCP load across ${var.cluster_name} controllers"
name = "${var.cluster_name}-apiserver"
description = "Distribute TCP load across ${var.cluster_name} controllers"
backend_service = "${google_compute_backend_service.apiserver.self_link}"
}
# Global backend service backed by unmanaged instance groups
resource "google_compute_backend_service" "apiserver" {
name = "${var.cluster_name}-apiserver"
name = "${var.cluster_name}-apiserver"
description = "${var.cluster_name} apiserver service"
protocol = "TCP"
port_name = "apiserver"
protocol = "TCP"
port_name = "apiserver"
session_affinity = "NONE"
timeout_sec = "60"
timeout_sec = "60"
# controller(s) spread across zonal instance groups
backend {
group = "${google_compute_instance_group.controllers.0.self_link}"
}
backend {
group = "${google_compute_instance_group.controllers.1.self_link}"
}
backend {
group = "${google_compute_instance_group.controllers.2.self_link}"
}
@ -74,22 +76,22 @@ resource "google_compute_instance_group" "controllers" {
instances = [
"${matchkeys(google_compute_instance.controllers.*.self_link,
google_compute_instance.controllers.*.zone,
list(element(local.zones, count.index)))}"
list(element(local.zones, count.index)))}",
]
}
# TCP health check for apiserver
resource "google_compute_health_check" "apiserver" {
name = "${var.cluster_name}-apiserver-tcp-health"
name = "${var.cluster_name}-apiserver-tcp-health"
description = "TCP health check for kube-apiserver"
timeout_sec = 5
timeout_sec = 5
check_interval_sec = 5
healthy_threshold = 1
healthy_threshold = 1
unhealthy_threshold = 3
tcp_health_check {
port = "443"
port = "443"
}
}

View File

@ -23,6 +23,7 @@ locals {
# TCP proxy load balancers require a fixed number of zonal backends. Spread
# controllers over up to 3 zones, since all GCP regions have at least 3.
zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.assigned_nat_ip}"]
}

View File

@ -14,43 +14,45 @@ resource "google_dns_record_set" "apiserver" {
# Static IPv4 address for the TCP Proxy Load Balancer
resource "google_compute_global_address" "apiserver-ipv4" {
name = "${var.cluster_name}-apiserver-ip"
name = "${var.cluster_name}-apiserver-ip"
ip_version = "IPV4"
}
# Forward IPv4 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "apiserver" {
name = "${var.cluster_name}-apiserver"
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
name = "${var.cluster_name}-apiserver"
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
port_range = "443"
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
}
# TCP Proxy Load Balancer for apiservers
resource "google_compute_target_tcp_proxy" "apiserver" {
name = "${var.cluster_name}-apiserver"
description = "Distribute TCP load across ${var.cluster_name} controllers"
name = "${var.cluster_name}-apiserver"
description = "Distribute TCP load across ${var.cluster_name} controllers"
backend_service = "${google_compute_backend_service.apiserver.self_link}"
}
# Backend service backed by unmanaged instance groups
resource "google_compute_backend_service" "apiserver" {
name = "${var.cluster_name}-apiserver"
name = "${var.cluster_name}-apiserver"
description = "${var.cluster_name} apiserver service"
protocol = "TCP"
port_name = "apiserver"
protocol = "TCP"
port_name = "apiserver"
session_affinity = "NONE"
timeout_sec = "60"
timeout_sec = "60"
# controller(s) spread across zonal instance groups
backend {
group = "${google_compute_instance_group.controllers.0.self_link}"
}
backend {
group = "${google_compute_instance_group.controllers.1.self_link}"
}
backend {
group = "${google_compute_instance_group.controllers.2.self_link}"
}
@ -74,22 +76,22 @@ resource "google_compute_instance_group" "controllers" {
instances = [
"${matchkeys(google_compute_instance.controllers.*.self_link,
google_compute_instance.controllers.*.zone,
list(element(local.zones, count.index)))}"
list(element(local.zones, count.index)))}",
]
}
# TCP health check for apiserver
resource "google_compute_health_check" "apiserver" {
name = "${var.cluster_name}-apiserver-tcp-health"
name = "${var.cluster_name}-apiserver-tcp-health"
description = "TCP health check for kube-apiserver"
timeout_sec = 5
timeout_sec = 5
check_interval_sec = 5
healthy_threshold = 1
healthy_threshold = 1
unhealthy_threshold = 3
tcp_health_check {
port = "443"
port = "443"
}
}

View File

@ -23,6 +23,7 @@ locals {
# TCP proxy load balancers require a fixed number of zonal backends. Spread
# controllers over up to 3 zones, since all GCP regions have at least 3.
zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.assigned_nat_ip}"]
}