Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_hdinsight_kafka_cluster: support rest proxy #8064

Merged
18 changes: 18 additions & 0 deletions azurerm/internal/clients/azuread/aad_client.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package azuread

import (
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common"
)

type Client struct {
GroupsClient *graphrbac.GroupsClient
}

func NewClient(o *common.ClientOptions) *Client {
aadGroupsClient := graphrbac.NewGroupsClientWithBaseURI(o.GraphEndpoint, o.TenantID)
o.ConfigureClient(&aadGroupsClient.Client, o.GraphAuthorizer)
return &Client{
GroupsClient: &aadGroupsClient,
}
}
4 changes: 4 additions & 0 deletions azurerm/internal/clients/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package clients
import (
"context"

"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients/azuread"

"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common"
Expand Down Expand Up @@ -110,6 +112,7 @@ type Client struct {
Attestation *attestation.Client
Authorization *authorization.Client
Automation *automation.Client
AzureAD *azuread.Client
AzureStackHCI *azureStackHCI.Client
Batch *batch.Client
Blueprints *blueprints.Client
Expand Down Expand Up @@ -207,6 +210,7 @@ func (client *Client) Build(ctx context.Context, o *common.ClientOptions) error
client.Attestation = attestation.NewClient(o)
client.Authorization = authorization.NewClient(o)
client.Automation = automation.NewClient(o)
client.AzureAD = azuread.NewClient(o)
client.AzureStackHCI = azureStackHCI.NewClient(o)
client.Batch = batch.NewClient(o)
client.Blueprints = blueprints.NewClient(o)
Expand Down
33 changes: 28 additions & 5 deletions azurerm/internal/services/hdinsight/common_hdinsight.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,11 @@ func hdinsightClusterDelete(clusterKind string) schema.DeleteFunc {
}

type hdInsightRoleDefinition struct {
HeadNodeDef HDInsightNodeDefinition
WorkerNodeDef HDInsightNodeDefinition
ZookeeperNodeDef HDInsightNodeDefinition
EdgeNodeDef *HDInsightNodeDefinition
HeadNodeDef HDInsightNodeDefinition
WorkerNodeDef HDInsightNodeDefinition
ZookeeperNodeDef HDInsightNodeDefinition
KafkaManagementNodeDef *HDInsightNodeDefinition
EdgeNodeDef *HDInsightNodeDefinition
}

func expandHDInsightRoles(input []interface{}, definition hdInsightRoleDefinition) (*[]hdinsight.Role, error) {
Expand Down Expand Up @@ -215,6 +216,18 @@ func expandHDInsightRoles(input []interface{}, definition hdInsightRoleDefinitio
roles = append(roles, *edgeNode)
}

if definition.KafkaManagementNodeDef != nil {
kafkaManagementNodeRaw := v["kafka_management_node"].([]interface{})
// "kafka_management_node" is optional, we expand it only when user has specified it.
if len(kafkaManagementNodeRaw) != 0 {
kafkaManagementNode, err := ExpandHDInsightNodeDefinition("kafkamanagementnode", kafkaManagementNodeRaw, *definition.KafkaManagementNodeDef)
if err != nil {
return nil, fmt.Errorf("Error expanding `kafka_management_node`: %+v", err)
}
roles = append(roles, *kafkaManagementNode)
}
}

return &roles, nil
}

Expand All @@ -223,7 +236,7 @@ func flattenHDInsightRoles(d *schema.ResourceData, input *hdinsight.ComputeProfi
return []interface{}{}
}

var existingEdgeNodes, existingHeadNodes, existingWorkerNodes, existingZookeeperNodes []interface{}
var existingKafkaManagementNodes, existingEdgeNodes, existingHeadNodes, existingWorkerNodes, existingZookeeperNodes []interface{}

existingVs := d.Get("roles").([]interface{})
if len(existingVs) > 0 {
Expand All @@ -233,6 +246,10 @@ func flattenHDInsightRoles(d *schema.ResourceData, input *hdinsight.ComputeProfi
existingEdgeNodes = existingV["edge_node"].([]interface{})
}

if definition.KafkaManagementNodeDef != nil {
existingKafkaManagementNodes = existingV["kafka_management_node"].([]interface{})
}

existingHeadNodes = existingV["head_node"].([]interface{})
existingWorkerNodes = existingV["worker_node"].([]interface{})
existingZookeeperNodes = existingV["zookeeper_node"].([]interface{})
Expand All @@ -259,6 +276,12 @@ func flattenHDInsightRoles(d *schema.ResourceData, input *hdinsight.ComputeProfi
result["edge_node"] = edgeNodes
}

if definition.KafkaManagementNodeDef != nil {
kafkaManagementNode := FindHDInsightRole(input.Roles, "kafkamanagementnode")
kafkaManagementNodes := FlattenHDInsightNodeDefinition(kafkaManagementNode, existingKafkaManagementNodes, *definition.KafkaManagementNodeDef)
result["kafka_management_node"] = kafkaManagementNodes
}

return []interface{}{
result,
}
Expand Down
9 changes: 6 additions & 3 deletions azurerm/internal/services/hdinsight/hdinsight.go
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ func ValidateSchemaHDInsightNodeDefinitionVMSize() schema.SchemaValidateFunc {
}, true)
}

func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNodeDefinition) *schema.Schema {
func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNodeDefinition, required bool) *schema.Schema {
result := map[string]*schema.Schema{
"vm_size": {
Type: schema.TypeString,
Expand Down Expand Up @@ -754,14 +754,17 @@ func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNo
}
}

return &schema.Schema{
s := &schema.Schema{
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Required: required,
Optional: !required,
Elem: &schema.Resource{
Schema: result,
},
}

return s
}

func ExpandHDInsightNodeDefinition(name string, input []interface{}, definition HDInsightNodeDefinition) (*hdinsight.Role, error) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,11 @@ func dataSourceHDInsightSparkCluster() *schema.Resource {
Computed: true,
},

"kafka_rest_proxy_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"ssh_endpoint": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -150,6 +155,8 @@ func dataSourceHDInsightClusterRead(d *schema.ResourceData, meta interface{}) er
d.Set("https_endpoint", httpEndpoint)
sshEndpoint := FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints)
d.Set("ssh_endpoint", sshEndpoint)
kafkaRestProxyEndpoint := FindHDInsightConnectivityEndpoint("KafkaRestProxyPublicEndpoint", props.ConnectivityEndpoints)
d.Set("kafka_rest_proxy_endpoint", kafkaRestProxyEndpoint)
}

return tags.FlattenAndSet(d, resp.Tags)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,24 @@ func TestAccDataSourceHDInsightCluster_kafka(t *testing.T) {
})
}

func TestAccDataSourceHDInsightCluster_kafkaWithRestProxy(t *testing.T) {
data := acceptance.BuildTestData(t, "data.azurerm_hdinsight_cluster", "test")
r := HDInsightClusterDataSourceResource{}
data.DataSourceTest(t, []resource.TestStep{
{
Config: r.kafkaWithRestProxy(data),
Check: resource.ComposeTestCheckFunc(
check.That(data.ResourceName).Key("kind").HasValue("kafka"),
check.That(data.ResourceName).Key("tier").HasValue("standard"),
check.That(data.ResourceName).Key("edge_ssh_endpoint").HasValue(""),
check.That(data.ResourceName).Key("https_endpoint").Exists(),
check.That(data.ResourceName).Key("ssh_endpoint").Exists(),
check.That(data.ResourceName).Key("kafka_rest_proxy_endpoint").Exists(),
),
},
})
}

func TestAccDataSourceHDInsightCluster_mlServices(t *testing.T) {
data := acceptance.BuildTestData(t, "data.azurerm_hdinsight_cluster", "test")
r := HDInsightClusterDataSourceResource{}
Expand Down Expand Up @@ -193,6 +211,17 @@ data "azurerm_hdinsight_cluster" "test" {
`, HDInsightKafkaClusterResource{}.basic(data))
}

func (HDInsightClusterDataSourceResource) kafkaWithRestProxy(data acceptance.TestData) string {
return fmt.Sprintf(`
%s

data "azurerm_hdinsight_cluster" "test" {
name = azurerm_hdinsight_kafka_cluster.test.name
resource_group_name = azurerm_hdinsight_kafka_cluster.test.resource_group_name
}
`, HDInsightKafkaClusterResource{}.restProxy(data))
}

func (HDInsightClusterDataSourceResource) mlServices(data acceptance.TestData) string {
return fmt.Sprintf(`
%s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,11 @@ func resourceHDInsightHadoopCluster() *schema.Resource {
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"head_node": SchemaHDInsightNodeDefinition("roles.0.head_node", hdInsightHadoopClusterHeadNodeDefinition),
"head_node": SchemaHDInsightNodeDefinition("roles.0.head_node", hdInsightHadoopClusterHeadNodeDefinition, true),

"worker_node": SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightHadoopClusterWorkerNodeDefinition),
"worker_node": SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightHadoopClusterWorkerNodeDefinition, true),

"zookeeper_node": SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHadoopClusterZookeeperNodeDefinition),
"zookeeper_node": SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHadoopClusterZookeeperNodeDefinition, true),

"edge_node": {
Type: schema.TypeList,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,11 @@ func resourceHDInsightHBaseCluster() *schema.Resource {
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"head_node": SchemaHDInsightNodeDefinition("roles.0.head_node", hdInsightHBaseClusterHeadNodeDefinition),
"head_node": SchemaHDInsightNodeDefinition("roles.0.head_node", hdInsightHBaseClusterHeadNodeDefinition, true),

"worker_node": SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightHBaseClusterWorkerNodeDefinition),
"worker_node": SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightHBaseClusterWorkerNodeDefinition, true),

"zookeeper_node": SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHBaseClusterZookeeperNodeDefinition),
"zookeeper_node": SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHBaseClusterZookeeperNodeDefinition, true),
},
},
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,11 @@ func resourceHDInsightInteractiveQueryCluster() *schema.Resource {
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"head_node": SchemaHDInsightNodeDefinition("roles.0.head_node", hdInsightInteractiveQueryClusterHeadNodeDefinition),
"head_node": SchemaHDInsightNodeDefinition("roles.0.head_node", hdInsightInteractiveQueryClusterHeadNodeDefinition, true),

"worker_node": SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightInteractiveQueryClusterWorkerNodeDefinition),
"worker_node": SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightInteractiveQueryClusterWorkerNodeDefinition, true),

"zookeeper_node": SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightInteractiveQueryClusterZookeeperNodeDefinition),
"zookeeper_node": SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightInteractiveQueryClusterZookeeperNodeDefinition, true),
},
},
},
Expand Down
Loading