diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 73449e5ea125..c7efecac2e5a 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -87,6 +87,12 @@ func resourceKubernetesClusterNodePool() *schema.Resource { Optional: true, }, + "enable_host_encryption": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "enable_node_public_ip": { Type: schema.TypeBool, Optional: true, @@ -285,16 +291,18 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf spotMaxPrice := d.Get("spot_max_price").(float64) t := d.Get("tags").(map[string]interface{}) vmSize := d.Get("vm_size").(string) + enableHostEncryption := d.Get("enable_host_encryption").(bool) profile := containerservice.ManagedClusterAgentPoolProfileProperties{ - OsType: containerservice.OSType(osType), - EnableAutoScaling: utils.Bool(enableAutoScaling), - EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), - Mode: mode, - ScaleSetPriority: containerservice.ScaleSetPriority(priority), - Tags: tags.Expand(t), - Type: containerservice.VirtualMachineScaleSets, - VMSize: containerservice.VMSizeTypes(vmSize), + OsType: containerservice.OSType(osType), + EnableAutoScaling: utils.Bool(enableAutoScaling), + EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), + Mode: mode, + ScaleSetPriority: containerservice.ScaleSetPriority(priority), + Tags: tags.Expand(t), + Type: containerservice.VirtualMachineScaleSets, + VMSize: containerservice.VMSizeTypes(vmSize), + EnableEncryptionAtHost: utils.Bool(enableHostEncryption), // this must always be sent during creation, but is optional for auto-scaled clusters during update Count: utils.Int32(int32(count)), @@ -462,6 +470,10 @@ func resourceKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interf props.EnableAutoScaling = utils.Bool(enableAutoScaling) } + if d.HasChange("enable_host_encryption") { + props.EnableEncryptionAtHost = utils.Bool(d.Get("enable_host_encryption").(bool)) + } + if d.HasChange("enable_node_public_ip") { props.EnableNodePublicIP = utils.Bool(d.Get("enable_node_public_ip").(bool)) } @@ -593,6 +605,7 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac d.Set("enable_auto_scaling", props.EnableAutoScaling) d.Set("enable_node_public_ip", props.EnableNodePublicIP) + d.Set("enable_host_encryption", props.EnableEncryptionAtHost) evictionPolicy := "" if props.ScaleSetEvictionPolicy != "" { diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 894ebc522a4a..9c49689d7cc1 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -47,6 +47,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){ "windows": testAccKubernetesClusterNodePool_windows, "windowsAndLinux": testAccKubernetesClusterNodePool_windowsAndLinux, "zeroSize": testAccKubernetesClusterNodePool_zeroSize, + "hostEncryption": testAccKubernetesClusterNodePool_hostEncryption, } func TestAccKubernetesClusterNodePool_autoScale(t *testing.T) { @@ -706,6 +707,26 @@ func testAccKubernetesClusterNodePool_zeroSize(t *testing.T) { }) } +func TestAccKubernetesClusterNodePool_hostEncryption(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesClusterNodePool_hostEncryption(t) +} + +func testAccKubernetesClusterNodePool_hostEncryption(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.hostEncryption(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("enable_host_encryption").HasValue("true"), + ), + }, + }) +} + func TestAccKubernetesClusterNodePool_maxSize(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesClusterNodePool_maxSize(t) @@ -1539,6 +1560,24 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, r.templateConfig(data)) } +func (r KubernetesClusterNodePoolResource) hostEncryption(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + + %s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + enable_host_encryption = true + node_count = 1 +} +`, r.templateConfig(data)) +} + func (r KubernetesClusterNodePoolResource) maxSizeConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go index 8a987d445d87..dbc1fa6a01d5 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go @@ -6,8 +6,10 @@ import ( "net/http" "testing" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -86,3 +88,55 @@ func (KubernetesClusterResource) updateDefaultNodePoolAgentCount(nodeCount int) return nil } } + +func TestAccKubernetesCluster_hostEncryption(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_hostEncryption(t) +} + +func testAccKubernetesCluster_hostEncryption(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.hostEncryption(data, currentKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("default_node_pool.0.enable_host_encryption").HasValue("true"), + ), + }, + }) +} + +func (KubernetesClusterResource) hostEncryption(data acceptance.TestData, controlPlaneVersion string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + kubernetes_version = %q + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + enable_host_encryption = true + } + + identity { + type = "SystemAssigned" + } +} + `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion) +} diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 7dbd483b0312..fce9ffce596a 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -68,6 +68,12 @@ func SchemaDefaultNodePool() *schema.Schema { ForceNew: true, }, + "enable_host_encryption": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "max_count": { Type: schema.TypeInt, Optional: true, @@ -206,13 +212,14 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC t := raw["tags"].(map[string]interface{}) profile := containerservice.ManagedClusterAgentPoolProfile{ - EnableAutoScaling: utils.Bool(enableAutoScaling), - EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), - Name: utils.String(raw["name"].(string)), - NodeLabels: nodeLabels, - Tags: tags.Expand(t), - Type: containerservice.AgentPoolType(raw["type"].(string)), - VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), + EnableAutoScaling: utils.Bool(enableAutoScaling), + EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), + EnableEncryptionAtHost: utils.Bool(raw["enable_host_encryption"].(bool)), + Name: utils.String(raw["name"].(string)), + NodeLabels: nodeLabels, + Tags: tags.Expand(t), + Type: containerservice.AgentPoolType(raw["type"].(string)), + VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), // at this time the default node pool has to be Linux or the AKS cluster fails to provision with: // Pods not in Running status: coredns-7fc597cc45-v5z7x,coredns-autoscaler-7ccc76bfbd-djl7j,metrics-server-cbd95f966-5rl97,tunnelfront-7d9884977b-wpbvn @@ -344,6 +351,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro enableNodePublicIP = *agentPool.EnableNodePublicIP } + enableHostEncryption := false + if agentPool.EnableEncryptionAtHost != nil { + enableHostEncryption = *agentPool.EnableEncryptionAtHost + } + maxCount := 0 if agentPool.MaxCount != nil { maxCount = int(*agentPool.MaxCount) @@ -402,6 +414,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "availability_zones": availabilityZones, "enable_auto_scaling": enableAutoScaling, "enable_node_public_ip": enableNodePublicIP, + "enable_host_encryption": enableHostEncryption, "max_count": maxCount, "max_pods": maxPods, "min_count": minCount, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index fabce7afcf43..90955bb7a816 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -229,6 +229,8 @@ A `default_node_pool` block supports the following: -> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to the `node_count` field. +* `enable_host_encryption` - (Optional) Should the nodes in the Default Node Pool have host encryption enabled? Defaults to `false`. + * `enable_node_public_ip` - (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to `false`. * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 00d9010ce1e3..9b29bba1f95c 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -73,6 +73,8 @@ The following arguments are supported: * `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Defaults to `false`. +* `enable_host_encryption` - (Optional) Should the nodes in this Node Pool have host encryption enabled? Defaults to `false`. + ~> **NOTE:** Additional fields must be configured depending on the value of this field - see below. * `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? Defaults to `false`.