Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/2919.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/tencentcloud_emr_cluster: Support multi_disks.
```
187 changes: 169 additions & 18 deletions tencentcloud/services/emr/extension_emr.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
package emr

import (
"fmt"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
emr "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/emr/v20190103"
"github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper"
)

const (
Expand Down Expand Up @@ -35,38 +38,123 @@ func buildResourceSpecSchema() *schema.Schema {
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"spec": {Type: schema.TypeString, Optional: true},
"storage_type": {Type: schema.TypeInt, Optional: true},
"disk_type": {Type: schema.TypeString, Optional: true},
"mem_size": {Type: schema.TypeInt, Optional: true},
"cpu": {Type: schema.TypeInt, Optional: true},
"disk_size": {Type: schema.TypeInt, Optional: true},
"root_size": {Type: schema.TypeInt, Optional: true},
"spec": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Node specification description, such as CVM.SA2.",
},
"storage_type": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Storage type. Value range:\n" +
" - 4: Represents cloud SSD;\n" +
" - 5: Represents efficient cloud disk;\n" +
" - 6: Represents enhanced SSD Cloud Block Storage;\n" +
" - 11: Represents throughput Cloud Block Storage;\n" +
" - 12: Represents extremely fast SSD Cloud Block Storage.",
},
"disk_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "disk types. Value range:\n" +
" - CLOUD_SSD: Represents cloud SSD;\n" +
" - CLOUD_PREMIUM: Represents efficient cloud disk;\n" +
" - CLOUD_BASIC: Represents Cloud Block Storage.",
},
"mem_size": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Memory size in M.",
},
"cpu": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Number of CPU cores.",
},
"disk_size": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Data disk capacity.",
},
"root_size": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Root disk capacity.",
},
"multi_disks": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Description: "Cloud disk list. When the data disk is a cloud disk, use disk_type and disk_size parameters directly, and use multi_disks for excess parts.",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Cloud disk type\n" +
" - CLOUD_SSD: Represents cloud SSD;\n" +
" - CLOUD_PREMIUM: Represents efficient cloud disk;\n" +
" - CLOUD_HSSD: Represents enhanced SSD Cloud Block Storage.",
},
"volume": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Cloud disk size.",
},
"count": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Number of cloud disks of this type.",
},
},
},
Set: func(v interface{}) int {
m := v.(map[string]interface{})
return helper.HashString(fmt.Sprintf("%s-%d-%d", m["disk_type"].(string), m["volume"].(int), m["count"].(int)))

},
},
},
},
Description: "Resource details.",
}
}

func ParseMultiDisks(_multiDisks []map[string]interface{}) []*emr.MultiDisk {
multiDisks := make([]*emr.MultiDisk, len(_multiDisks))
for _, item := range _multiDisks {
func ParseMultiDisks(_multiDisks []interface{}) []*emr.MultiDisk {
multiDisks := make([]*emr.MultiDisk, 0, len(_multiDisks))
for _, multiDisk := range _multiDisks {
item := multiDisk.(map[string]interface{})
var diskType string
var volume int64
var count int64
var volume int
var count int
for subK, subV := range item {
if subK == "disk_type" {
diskType = subV.(string)
} else if subK == "volume" {
volume = subV.(int64)
volume = subV.(int)
} else if subK == "count" {
count = subV.(int64)
count = subV.(int)
}
}
multiDisks = append(multiDisks,
&emr.MultiDisk{
DiskType: common.StringPtr(diskType),
Volume: common.Int64Ptr(volume),
Count: common.Int64Ptr(count),
DiskType: helper.String(diskType),
Volume: helper.IntInt64(volume),
Count: helper.IntInt64(count),
})
}

Expand Down Expand Up @@ -101,7 +189,7 @@ func ParseResource(_resource map[string]interface{}) *emr.Resource {
} else if k == "root_size" {
resultResource.RootSize = common.Int64Ptr((int64)(v.(int)))
} else if k == "multi_disks" {
multiDisks := v.([]map[string]interface{})
multiDisks := v.(*schema.Set).List()
resultResource.MultiDisks = ParseMultiDisks(multiDisks)
} else if k == "tags" {
tags := v.([]map[string]string)
Expand All @@ -116,3 +204,66 @@ func ParseResource(_resource map[string]interface{}) *emr.Resource {
}
return resultResource
}

func validateMultiDisks(r map[string]interface{}) error {
if _, ok := r["multi_disks"]; !ok {
return nil
}
multiDiskList := r["multi_disks"].(*schema.Set).List()
visited := make(map[string]struct{})

for _, multiDisk := range multiDiskList {
multiDiskMap := multiDisk.(map[string]interface{})
key := fmt.Sprintf("%s-%d", multiDiskMap["disk_type"].(string), multiDiskMap["volume"].(int))
if _, ok := visited[key]; ok {
return fmt.Errorf("Merge disks of the same specifications")
} else {
visited[key] = struct{}{}
}
}

return nil
}

func fetchMultiDisks(v *emr.NodeHardwareInfo, r *emr.OutterResource) (multiDisks []interface{}) {
var inputDataDiskTag string
if r.DiskType != nil && r.DiskSize != nil {
inputDataDiskTag = fmt.Sprintf("%s-%d", *r.DiskType, *r.DiskSize)
}
for _, item := range v.MCMultiDisk {
outputDataDiskTag := ""
multiDisk := make(map[string]interface{})
if item.Type != nil {
var diskType string
if *item.Type == 4 {
diskType = "CLOUD_SSD"
}
if *item.Type == 5 {
diskType = "CLOUD_PREMIUM"
}
if *item.Type == 6 {
diskType = "CLOUD_HSSD"
}
multiDisk["disk_type"] = diskType
outputDataDiskTag = diskType
}
if item.Volume != nil {
volume := int(*item.Volume / 1024 / 1024 / 1024)
multiDisk["volume"] = volume
outputDataDiskTag = fmt.Sprintf("%s-%d", outputDataDiskTag, volume)
}
var count int
if item.Count != nil {
count = int(*item.Count)
if count > 0 && inputDataDiskTag == outputDataDiskTag {
count -= 1
}
multiDisk["count"] = count
}

if count != 0 {
multiDisks = append(multiDisks, multiDisk)
}
}
return
}
80 changes: 52 additions & 28 deletions tencentcloud/services/emr/resource_tc_emr_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,18 +40,20 @@ func ResourceTencentCloudEmrCluster() *schema.Resource {
Required: true,
ForceNew: true,
Description: "Product ID. Different products ID represents different EMR product versions. Value range:\n" +
"- 16: represents EMR-V2.3.0\n" +
"- 20: indicates EMR-V2.5.0\n" +
"- 25: represents EMR-V3.1.0\n" +
"- 27: represents KAFKA-V1.0.0\n" +
"- 30: indicates EMR-V2.6.0\n" +
"- 33: represents EMR-V3.2.1\n" +
"- 34: stands for EMR-V3.3.0\n" +
"- 36: represents STARROCKS-V1.0.0\n" +
"- 37: indicates EMR-V3.4.0\n" +
"- 38: represents EMR-V2.7.0\n" +
"- 39: stands for STARROCKS-V1.1.0\n" +
"- 41: represents DRUID-V1.1.0.",
" - 16: represents EMR-V2.3.0\n" +
" - 20: represents EMR-V2.5.0\n" +
" - 25: represents EMR-V3.1.0\n" +
" - 27: represents KAFKA-V1.0.0\n" +
" - 30: represents EMR-V2.6.0\n" +
" - 33: represents EMR-V3.2.1\n" +
" - 34: represents EMR-V3.3.0\n" +
" - 37: represents EMR-V3.4.0\n" +
" - 38: represents EMR-V2.7.0\n" +
" - 44: represents EMR-V3.5.0\n" +
" - 50: represents KAFKA-V2.0.0\n" +
" - 51: represents STARROCKS-V1.4.0\n" +
" - 53: represents EMR-V3.6.0\n" +
" - 54: represents STARROCKS-V2.0.0.",
},
"vpc_settings": {
Type: schema.TypeMap,
Expand Down Expand Up @@ -534,6 +536,31 @@ func resourceTencentCloudEmrClusterRead(d *schema.ResourceData, meta interface{}
}

_ = d.Set("instance_id", instanceId)
clusterNodeMap := make(map[string]*emr.NodeHardwareInfo)
err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError {
result, err := emrService.DescribeClusterNodes(ctx, instanceId, "all", "all", 0, 10)

if err != nil {
return resource.RetryableError(err)
}

if len(result) > 0 {
_ = d.Set("auto_renew", result[0].IsAutoRenew)
for _, item := range result {
node := item
// 节点类型 0:common节点;1:master节点;2:core节点;3:task节点
if node.Flag != nil {
clusterNodeMap[strconv.FormatInt(*node.Flag, 10)] = node
}
}
}

return nil
})

if err != nil {
return err
}
if instance != nil {
_ = d.Set("product_id", instance.ProductId)
_ = d.Set("vpc_settings", map[string]interface{}{
Expand Down Expand Up @@ -587,6 +614,9 @@ func resourceTencentCloudEmrClusterRead(d *schema.ResourceData, meta interface{}
if masterResource.RootSize != nil {
masterResourceSpec["root_size"] = *masterResource.RootSize
}
if v, ok := clusterNodeMap["1"]; ok {
masterResourceSpec["multi_disks"] = fetchMultiDisks(v, masterResource)
}
resourceSpec["master_resource_spec"] = []interface{}{masterResourceSpec}
}

Expand Down Expand Up @@ -619,6 +649,10 @@ func resourceTencentCloudEmrClusterRead(d *schema.ResourceData, meta interface{}
if coreResource.RootSize != nil {
coreResourceSpec["root_size"] = *coreResource.RootSize
}
if v, ok := clusterNodeMap["2"]; ok {
coreResourceSpec["multi_disks"] = fetchMultiDisks(v, coreResource)
}

resourceSpec["core_resource_spec"] = []interface{}{coreResourceSpec}
}

Expand Down Expand Up @@ -651,6 +685,9 @@ func resourceTencentCloudEmrClusterRead(d *schema.ResourceData, meta interface{}
if taskResource.RootSize != nil {
taskResourceSpec["root_size"] = *taskResource.RootSize
}
if v, ok := clusterNodeMap["3"]; ok {
taskResourceSpec["multi_disks"] = fetchMultiDisks(v, taskResource)
}
resourceSpec["task_resource_spec"] = []interface{}{taskResourceSpec}
}

Expand Down Expand Up @@ -683,6 +720,9 @@ func resourceTencentCloudEmrClusterRead(d *schema.ResourceData, meta interface{}
if comResource.RootSize != nil {
comResourceSpec["root_size"] = *comResource.RootSize
}
if v, ok := clusterNodeMap["0"]; ok {
comResourceSpec["multi_disks"] = fetchMultiDisks(v, comResource)
}
resourceSpec["common_resource_spec"] = []interface{}{comResourceSpec}
}

Expand Down Expand Up @@ -713,22 +753,6 @@ func resourceTencentCloudEmrClusterRead(d *schema.ResourceData, meta interface{}
return err
}
_ = d.Set("tags", tags)
err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError {
result, err := emrService.DescribeClusterNodes(ctx, instanceId, "all", "all", 0, 10)

if err != nil {
return resource.RetryableError(err)
}

if len(result) > 0 {
_ = d.Set("auto_renew", result[0].IsAutoRenew)
}

return nil
})

if err != nil {
return err
}
return nil
}
Loading
Loading