1
0
mirror of https://github.com/poseidon/typhoon synced 2024-11-17 20:14:02 +01:00

Change worker managed instance group to span zones in region

* Change Google Cloud module to require the `region` variable
* Workers are created in random zones within the given region
* Tolerate Google Cloud zone failures or capacity issues
* If workers are preempted (if enabled), replacement instances can
be drawn from any zone in the region, which should avoid scheduling
issues that were possible before if a single zone aggressively
preempts instances (presumably due to Google Cloud capacity)
This commit is contained in:
Dalton Hubble 2017-11-04 10:57:12 -07:00
parent e32885c9cd
commit 6300383b43
8 changed files with 26 additions and 11 deletions

@ -4,6 +4,11 @@ Notable changes between versions.
## Latest
#### Google Cloud
* Add required variable `region`
* Change worker managed instance group to automatically span zones in a region
## v1.8.2
* Kubernetes v1.8.2

@ -46,6 +46,7 @@ module "google-cloud-yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes"
# Google Cloud
region = "us-central1"
zone = "us-central1-c"
dns_zone = "example.com"
dns_zone_name = "example-zone"

@ -77,6 +77,7 @@ module "google-cloud-yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes"
# Google Cloud
region = "us-central1"
zone = "us-central1-c"
dns_zone = "example.com"
dns_zone_name = "example-zone"
@ -196,6 +197,7 @@ Learn about [version pinning](concepts.md#versioning), maintenance, and [addons]
| Name | Description | Example |
|:-----|:------------|:--------|
| cluster_name | Unique cluster name (prepended to dns_zone) | "yavin" |
| region | Google Cloud region | "us-central1" |
| zone | Google Cloud zone | "us-central1-f" |
| dns_zone | Google Cloud DNS zone | "google-cloud.example.com" |
| dns_zone_name | Google Cloud DNS zone name | "example-zone" |

@ -46,6 +46,7 @@ module "google-cloud-yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes"
# Google Cloud
region = "us-central1"
zone = "us-central1-c"
dns_zone = "example.com"
dns_zone_name = "example-zone"

@ -29,8 +29,8 @@ module "workers" {
# GCE
network = "${google_compute_network.network.name}"
region = "${var.region}"
count = "${var.worker_count}"
zone = "${var.zone}"
machine_type = "${var.machine_type}"
os_image = "${var.os_image}"
preemptible = "${var.worker_preemptible}"

@ -3,9 +3,14 @@ variable "cluster_name" {
description = "Cluster name"
}
variable "region" {
type = "string"
description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
}
variable "zone" {
type = "string"
description = "Google Cloud zone (e.g. us-central1-f, see `gcloud compute zones list`)"
description = "Google Cloud Zone (e.g. us-central1-f, see `gcloud compute zones list`)"
}
variable "dns_zone" {

@ -20,9 +20,9 @@ variable "count" {
description = "Number of worker compute instances the instance group should manage"
}
variable "zone" {
variable "region" {
type = "string"
description = "Google zone that compute instances in the group should be created in (e.g. gcloud compute zones list)"
description = "Google Cloud region to create a regional managed group of workers (e.g. us-central1, see `gcloud compute regions list`)."
}
variable "machine_type" {

@ -1,16 +1,17 @@
# Managed Instance Group
resource "google_compute_instance_group_manager" "workers" {
# Regional managed instance group maintains a homogeneous set of workers that
# span the zones in the region.
resource "google_compute_region_instance_group_manager" "workers" {
name = "${var.cluster_name}-worker-group"
description = "Compute instance group of ${var.cluster_name} workers"
# Instance name prefix for instances in the group
# instance name prefix for instances in the group
base_instance_name = "${var.cluster_name}-worker"
instance_template = "${google_compute_instance_template.worker.self_link}"
update_strategy = "RESTART"
zone = "${var.zone}"
target_size = "${var.count}"
region = "${var.region}"
# Target pool instances in the group should be added into
target_size = "${var.count}"
# target pool to which instances in the group should be added
target_pools = [
"${google_compute_target_pool.workers.self_link}",
]