Skip to content

Commit 7860019

Browse files
committed
fix: prevent failure when planning network creation concurrently
1 parent 3424339 commit 7860019

File tree

21 files changed

+171
-175
lines changed

21 files changed

+171
-175
lines changed

CONTRIBUTING.md

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,17 @@ must be refreshed if the module interfaces are changed.
1919

2020
## Templating
2121

22-
To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory.
22+
To more cleanly handle cases where desired functionality would require complex
23+
duplication of Terraform resources (e.g. [PR
24+
51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)),
25+
this repository is largely generated from the [`autogen`](/autogen) directory.
2326

24-
The root module is generated by running `make build`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate.
27+
The root module is generated by running `make build`. Changes to this
28+
repository should be made in the [`autogen`](/autogen) directory where
29+
appropriate.
2530

26-
Note: The correct sequence to update the repo using autogen functionality is to run
27-
`make build`. This will create the various Terraform files, and then
31+
Note: The correct sequence to update the repo using autogen functionality is to
32+
run `make build`. This will create the various Terraform files, and then
2833
generate the Terraform documentation using `terraform-docs`.
2934

3035
### Autogeneration of documentation from .tf files
@@ -56,7 +61,8 @@ Six test-kitchen instances are defined:
5661
- `simple-zonal`
5762
- `stub-domains`
5863

59-
The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory.`
64+
The test-kitchen instances in `test/fixtures/` wrap identically-named examples
65+
in the `examples/` directory.
6066

6167
### Test Environment
6268
The easiest way to test the module is in an isolated test project. The

autogen/main/firewall.tf.tmpl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,11 @@ resource "google_compute_firewall" "intra_egress" {
3434
direction = "EGRESS"
3535

3636
target_tags = [local.cluster_network_tag]
37-
destination_ranges = [
37+
destination_ranges = compact([
3838
local.cluster_endpoint_for_nodes,
3939
local.cluster_subnet_cidr,
40-
local.cluster_alias_ranges_cidr[var.ip_range_pods],
41-
]
40+
lookup(local.cluster_alias_ranges_cidr, var.ip_range_pods, null),
41+
])
4242

4343
# Allow all possible protocols
4444
allow { protocol = "tcp" }
@@ -134,7 +134,7 @@ resource "google_compute_firewall" "master_webhooks" {
134134
traffic flow between the managed firewall rules
135135
*****************************************/
136136
resource "google_compute_firewall" "shadow_allow_pods" {
137-
count = var.add_shadow_firewall_rules ? 1 : 0
137+
count = var.add_shadow_firewall_rules && can(local.cluster_alias_ranges_cidr[var.ip_range_pods]) ? 1 : 0
138138

139139
name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-all"
140140
description = "Managed by terraform gke module: A shadow firewall rule to match the default rule allowing pod communication."
@@ -183,7 +183,7 @@ resource "google_compute_firewall" "shadow_allow_master" {
183183
}
184184

185185
resource "google_compute_firewall" "shadow_allow_nodes" {
186-
count = var.add_shadow_firewall_rules ? 1 : 0
186+
count = var.add_shadow_firewall_rules && local.cluster_subnet_cidr != null ? 1 : 0
187187

188188
name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-vms"
189189
description = "Managed by Terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes communication."

autogen/main/main.tf.tmpl

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,30 +20,34 @@
2020
Get available zones in region
2121
*****************************************/
2222
data "google_compute_zones" "available" {
23+
count = var.zones == [] ? 1 : 0
2324
{% if beta_cluster %}
2425
provider = google-beta
2526
{% else %}
2627
provider = google
2728
{% endif %}
2829

2930
project = var.project_id
30-
region = local.region
31+
region = var.region
3132
}
3233

3334
resource "random_shuffle" "available_zones" {
34-
input = data.google_compute_zones.available.names
35+
count = var.zones == [] ? 1 : 0
36+
input = one(data.google_compute_zones.available[*].names)
3537
result_count = 3
3638
}
3739

3840
locals {
3941
// ID of the cluster
4042
cluster_id = google_container_cluster.primary.id
43+
// Zone(s) of the cluster
44+
zones = var.zones == [] ? sort(one(random_shuffle.available_zones[*].result)) : var.zones
4145

4246
// location
4347
location = var.regional ? var.region : var.zones[0]
44-
region = var.regional ? var.region : join("-", slice(split("-", var.zones[0]), 0, 2))
48+
region = var.region != null ? var.region : join("-", slice(split("-", var.zones[0]), 0, 2))
4549
// for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted
46-
node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones))
50+
node_locations = var.regional ? local.zones : slice(local.zones, 1, length(local.zones))
4751
// Kubernetes version
4852
master_version_regional = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.region.latest_master_version
4953
master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version
@@ -74,7 +78,6 @@ locals {
7478
custom_kube_dns_config = length(keys(var.stub_domains)) > 0
7579
upstream_nameservers_config = length(var.upstream_nameservers) > 0
7680
network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id
77-
zone_count = length(var.zones)
7881
cluster_type = var.regional ? "regional" : "zonal"
7982
// auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous.
8083
{% if beta_cluster %}
@@ -85,7 +88,7 @@ locals {
8588
{% endif %}
8689

8790
cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null
88-
cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {}
91+
cluster_alias_ranges_cidr = var.add_cluster_firewall_rules && data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range != null ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {}
8992

9093
{% if autopilot_cluster != true %}
9194
cluster_network_policy = var.network_policy ? [{
@@ -129,7 +132,7 @@ locals {
129132

130133
cluster_output_name = google_container_cluster.primary.name
131134
cluster_output_regional_zones = google_container_cluster.primary.node_locations
132-
cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : []
135+
cluster_output_zonal_zones = local.zones
133136
cluster_output_zones = local.cluster_output_regional_zones
134137

135138
{% if private_cluster %}
@@ -170,11 +173,11 @@ locals {
170173
[for np in google_container_node_pool.pools : np.name], [""],
171174
[for np in google_container_node_pool.windows_pools : np.name], [""]
172175
)
173-
176+
174177
cluster_output_node_pools_versions = merge(
175178
{ for np in google_container_node_pool.pools : np.name => np.version },
176179
{ for np in google_container_node_pool.windows_pools : np.name => np.version },
177-
)
180+
)
178181
{% endif %}
179182

180183
cluster_master_auth_list_layer1 = local.cluster_output_master_auth
@@ -236,10 +239,6 @@ data "google_container_engine_versions" "region" {
236239
}
237240

238241
data "google_container_engine_versions" "zone" {
239-
// Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error
240-
//
241-
// data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone.
242-
//
243-
location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0]
242+
location = local.zones[0]
244243
project = var.project_id
245244
}

firewall.tf

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,11 @@ resource "google_compute_firewall" "intra_egress" {
3434
direction = "EGRESS"
3535

3636
target_tags = [local.cluster_network_tag]
37-
destination_ranges = [
37+
destination_ranges = compact([
3838
local.cluster_endpoint_for_nodes,
3939
local.cluster_subnet_cidr,
40-
local.cluster_alias_ranges_cidr[var.ip_range_pods],
41-
]
40+
lookup(local.cluster_alias_ranges_cidr, var.ip_range_pods, null),
41+
])
4242

4343
# Allow all possible protocols
4444
allow { protocol = "tcp" }
@@ -90,7 +90,7 @@ resource "google_compute_firewall" "master_webhooks" {
9090
traffic flow between the managed firewall rules
9191
*****************************************/
9292
resource "google_compute_firewall" "shadow_allow_pods" {
93-
count = var.add_shadow_firewall_rules ? 1 : 0
93+
count = var.add_shadow_firewall_rules && can(local.cluster_alias_ranges_cidr[var.ip_range_pods]) ? 1 : 0
9494

9595
name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-all"
9696
description = "Managed by terraform gke module: A shadow firewall rule to match the default rule allowing pod communication."
@@ -139,7 +139,7 @@ resource "google_compute_firewall" "shadow_allow_master" {
139139
}
140140

141141
resource "google_compute_firewall" "shadow_allow_nodes" {
142-
count = var.add_shadow_firewall_rules ? 1 : 0
142+
count = var.add_shadow_firewall_rules && local.cluster_subnet_cidr != null ? 1 : 0
143143

144144
name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-vms"
145145
description = "Managed by Terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes communication."

main.tf

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,26 +20,30 @@
2020
Get available zones in region
2121
*****************************************/
2222
data "google_compute_zones" "available" {
23+
count = var.zones == [] ? 1 : 0
2324
provider = google
2425

2526
project = var.project_id
26-
region = local.region
27+
region = var.region
2728
}
2829

2930
resource "random_shuffle" "available_zones" {
30-
input = data.google_compute_zones.available.names
31+
count = var.zones == [] ? 1 : 0
32+
input = one(data.google_compute_zones.available[*].names)
3133
result_count = 3
3234
}
3335

3436
locals {
3537
// ID of the cluster
3638
cluster_id = google_container_cluster.primary.id
39+
// Zone(s) of the cluster
40+
zones = var.zones == [] ? sort(one(random_shuffle.available_zones[*].result)) : var.zones
3741

3842
// location
3943
location = var.regional ? var.region : var.zones[0]
40-
region = var.regional ? var.region : join("-", slice(split("-", var.zones[0]), 0, 2))
44+
region = var.region != null ? var.region : join("-", slice(split("-", var.zones[0]), 0, 2))
4145
// for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted
42-
node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones))
46+
node_locations = var.regional ? local.zones : slice(local.zones, 1, length(local.zones))
4347
// Kubernetes version
4448
master_version_regional = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.region.latest_master_version
4549
master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version
@@ -66,13 +70,12 @@ locals {
6670
custom_kube_dns_config = length(keys(var.stub_domains)) > 0
6771
upstream_nameservers_config = length(var.upstream_nameservers) > 0
6872
network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id
69-
zone_count = length(var.zones)
7073
cluster_type = var.regional ? "regional" : "zonal"
7174
// auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous.
7275
default_auto_upgrade = var.regional ? true : false
7376

7477
cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null
75-
cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {}
78+
cluster_alias_ranges_cidr = var.add_cluster_firewall_rules && data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range != null ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {}
7679

7780
cluster_network_policy = var.network_policy ? [{
7881
enabled = true
@@ -95,7 +98,7 @@ locals {
9598

9699
cluster_output_name = google_container_cluster.primary.name
97100
cluster_output_regional_zones = google_container_cluster.primary.node_locations
98-
cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : []
101+
cluster_output_zonal_zones = local.zones
99102
cluster_output_zones = local.cluster_output_regional_zones
100103

101104
cluster_endpoint = google_container_cluster.primary.endpoint
@@ -167,10 +170,6 @@ data "google_container_engine_versions" "region" {
167170
}
168171

169172
data "google_container_engine_versions" "zone" {
170-
// Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error
171-
//
172-
// data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone.
173-
//
174-
location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0]
173+
location = local.zones[0]
175174
project = var.project_id
176175
}

modules/beta-autopilot-private-cluster/firewall.tf

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,11 @@ resource "google_compute_firewall" "intra_egress" {
3434
direction = "EGRESS"
3535

3636
target_tags = [local.cluster_network_tag]
37-
destination_ranges = [
37+
destination_ranges = compact([
3838
local.cluster_endpoint_for_nodes,
3939
local.cluster_subnet_cidr,
40-
local.cluster_alias_ranges_cidr[var.ip_range_pods],
41-
]
40+
lookup(local.cluster_alias_ranges_cidr, var.ip_range_pods, null),
41+
])
4242

4343
# Allow all possible protocols
4444
allow { protocol = "tcp" }
@@ -117,7 +117,7 @@ resource "google_compute_firewall" "master_webhooks" {
117117
traffic flow between the managed firewall rules
118118
*****************************************/
119119
resource "google_compute_firewall" "shadow_allow_pods" {
120-
count = var.add_shadow_firewall_rules ? 1 : 0
120+
count = var.add_shadow_firewall_rules && can(local.cluster_alias_ranges_cidr[var.ip_range_pods]) ? 1 : 0
121121

122122
name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-all"
123123
description = "Managed by terraform gke module: A shadow firewall rule to match the default rule allowing pod communication."
@@ -166,7 +166,7 @@ resource "google_compute_firewall" "shadow_allow_master" {
166166
}
167167

168168
resource "google_compute_firewall" "shadow_allow_nodes" {
169-
count = var.add_shadow_firewall_rules ? 1 : 0
169+
count = var.add_shadow_firewall_rules && local.cluster_subnet_cidr != null ? 1 : 0
170170

171171
name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-vms"
172172
description = "Managed by Terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes communication."

modules/beta-autopilot-private-cluster/main.tf

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,26 +20,30 @@
2020
Get available zones in region
2121
*****************************************/
2222
data "google_compute_zones" "available" {
23+
count = var.zones == [] ? 1 : 0
2324
provider = google-beta
2425

2526
project = var.project_id
26-
region = local.region
27+
region = var.region
2728
}
2829

2930
resource "random_shuffle" "available_zones" {
30-
input = data.google_compute_zones.available.names
31+
count = var.zones == [] ? 1 : 0
32+
input = one(data.google_compute_zones.available[*].names)
3133
result_count = 3
3234
}
3335

3436
locals {
3537
// ID of the cluster
3638
cluster_id = google_container_cluster.primary.id
39+
// Zone(s) of the cluster
40+
zones = var.zones == [] ? sort(one(random_shuffle.available_zones[*].result)) : var.zones
3741

3842
// location
3943
location = var.regional ? var.region : var.zones[0]
40-
region = var.regional ? var.region : join("-", slice(split("-", var.zones[0]), 0, 2))
44+
region = var.region != null ? var.region : join("-", slice(split("-", var.zones[0]), 0, 2))
4145
// for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted
42-
node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones))
46+
node_locations = var.regional ? local.zones : slice(local.zones, 1, length(local.zones))
4347
// Kubernetes version
4448
master_version_regional = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.region.latest_master_version
4549
master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version
@@ -52,14 +56,13 @@ locals {
5256
custom_kube_dns_config = length(keys(var.stub_domains)) > 0
5357
upstream_nameservers_config = length(var.upstream_nameservers) > 0
5458
network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id
55-
zone_count = length(var.zones)
5659
cluster_type = var.regional ? "regional" : "zonal"
5760
// auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous.
5861
// When a release channel is used, node auto-upgrade are enabled and cannot be disabled.
5962
default_auto_upgrade = var.regional || var.release_channel != null ? true : false
6063

6164
cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null
62-
cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {}
65+
cluster_alias_ranges_cidr = var.add_cluster_firewall_rules && data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range != null ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {}
6366

6467

6568
cluster_authenticator_security_group = var.authenticator_security_group == null ? [] : [{
@@ -69,7 +72,7 @@ locals {
6972

7073
cluster_output_name = google_container_cluster.primary.name
7174
cluster_output_regional_zones = google_container_cluster.primary.node_locations
72-
cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : []
75+
cluster_output_zonal_zones = local.zones
7376
cluster_output_zones = local.cluster_output_regional_zones
7477

7578
cluster_endpoint = (var.enable_private_nodes && length(google_container_cluster.primary.private_cluster_config) > 0) ? (var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.private_cluster_config.0.public_endpoint) : google_container_cluster.primary.endpoint
@@ -145,10 +148,6 @@ data "google_container_engine_versions" "region" {
145148
}
146149

147150
data "google_container_engine_versions" "zone" {
148-
// Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error
149-
//
150-
// data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone.
151-
//
152-
location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0]
151+
location = local.zones[0]
153152
project = var.project_id
154153
}

0 commit comments

Comments
 (0)