From 4c3ff1615b769de7ee561ad2323e073951a629a9 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Mon, 20 Jan 2025 15:54:43 +0800 Subject: [PATCH 1/6] add --- .../services/cvm/resource_tc_instance.go | 394 +++++++++--------- 1 file changed, 201 insertions(+), 193 deletions(-) diff --git a/tencentcloud/services/cvm/resource_tc_instance.go b/tencentcloud/services/cvm/resource_tc_instance.go index 7accdb46c2..0e3a76a4a7 100644 --- a/tencentcloud/services/cvm/resource_tc_instance.go +++ b/tencentcloud/services/cvm/resource_tc_instance.go @@ -986,7 +986,7 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{}) _ = d.Set("tags", tags) // set system_disk_name - if instance.SystemDisk.DiskId != nil { + if instance.SystemDisk.DiskId != nil && strings.HasPrefix(*instance.SystemDisk.DiskId, "disk-") { err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { disks, err := cbsService.DescribeDiskList(ctx, []*string{instance.SystemDisk.DiskId}) if err != nil { @@ -1028,7 +1028,7 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{}) value := item.(map[string]interface{}) if v, ok := value["data_disk_id"]; ok && v != nil { diskId := v.(string) - if diskId != "" { + if diskId != "" && strings.HasPrefix(diskId, "disk-") { dataDiskIds = append(dataDiskIds, &diskId) hasDataDisksId = true } @@ -1092,114 +1092,120 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{}) } } - err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - disks, err := cbsService.DescribeDiskList(ctx, diskIds) - if err != nil { - return tccommon.RetryError(err) - } - - for i := range disks { - disk := disks[i] - if *disk.DiskState == "EXPANDING" { - return resource.RetryableError(fmt.Errorf("data_disk[%d] is expending", i)) + if len(diskIds) > 0 { + err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + disks, err := cbsService.DescribeDiskList(ctx, diskIds) + if err != nil { + return tccommon.RetryError(err) } - diskSizeMap[*disk.DiskId] = disk.DiskSize - if hasDataDisks { - items := strings.Split(*disk.DiskName, "_") - diskOrder := items[len(items)-1] - diskOrderInt, err := strconv.Atoi(diskOrder) - if err != nil { - isCombineDataDisks = true - continue + for i := range disks { + disk := disks[i] + if *disk.DiskState == "EXPANDING" { + return resource.RetryableError(fmt.Errorf("data_disk[%d] is expending", i)) } - diskOrderMap[*disk.DiskId] = diskOrderInt + diskSizeMap[*disk.DiskId] = disk.DiskSize + if hasDataDisks { + items := strings.Split(*disk.DiskName, "_") + diskOrder := items[len(items)-1] + diskOrderInt, err := strconv.Atoi(diskOrder) + if err != nil { + isCombineDataDisks = true + continue + } + + diskOrderMap[*disk.DiskId] = diskOrderInt + } } + + return nil + }) + + if err != nil { + return err } - return nil - }) + tmpDataDisks := make([]interface{}, 0, len(instance.DataDisks)) + if v, ok := d.GetOk("data_disks"); ok { + tmpDataDisks = v.([]interface{}) + } - if err != nil { - return err - } + for _, disk := range instance.DataDisks { + dataDisk := make(map[string]interface{}, 5) + if !strings.HasPrefix(*disk.DiskId, "disk-") { + continue + } - tmpDataDisks := make([]interface{}, 0, len(instance.DataDisks)) - if v, ok := d.GetOk("data_disks"); ok { - tmpDataDisks = v.([]interface{}) - } + dataDisk["data_disk_id"] = disk.DiskId + if disk.DiskId == nil { + dataDisk["data_disk_size"] = disk.DiskSize + } else if size, ok := diskSizeMap[*disk.DiskId]; ok { + dataDisk["data_disk_size"] = size + } - for _, disk := range instance.DataDisks { - dataDisk := make(map[string]interface{}, 5) - dataDisk["data_disk_id"] = disk.DiskId - if disk.DiskId == nil { - dataDisk["data_disk_size"] = disk.DiskSize - } else if size, ok := diskSizeMap[*disk.DiskId]; ok { - dataDisk["data_disk_size"] = size + dataDisk["data_disk_type"] = disk.DiskType + dataDisk["data_disk_snapshot_id"] = disk.SnapshotId + dataDisk["delete_with_instance"] = disk.DeleteWithInstance + dataDisk["encrypt"] = disk.Encrypt + dataDisk["throughput_performance"] = disk.ThroughputPerformance + dataDiskList = append(dataDiskList, dataDisk) } - dataDisk["data_disk_type"] = disk.DiskType - dataDisk["data_disk_snapshot_id"] = disk.SnapshotId - dataDisk["delete_with_instance"] = disk.DeleteWithInstance - dataDisk["encrypt"] = disk.Encrypt - dataDisk["throughput_performance"] = disk.ThroughputPerformance - dataDiskList = append(dataDiskList, dataDisk) - } - - if hasDataDisks && !isCombineDataDisks { - sort.SliceStable(dataDiskList, func(idx1, idx2 int) bool { - dataDiskIdIdx1 := *dataDiskList[idx1]["data_disk_id"].(*string) - dataDiskIdIdx2 := *dataDiskList[idx2]["data_disk_id"].(*string) - return diskOrderMap[dataDiskIdIdx1] < diskOrderMap[dataDiskIdIdx2] - }) - } + if hasDataDisks && !isCombineDataDisks { + sort.SliceStable(dataDiskList, func(idx1, idx2 int) bool { + dataDiskIdIdx1 := *dataDiskList[idx1]["data_disk_id"].(*string) + dataDiskIdIdx2 := *dataDiskList[idx2]["data_disk_id"].(*string) + return diskOrderMap[dataDiskIdIdx1] < diskOrderMap[dataDiskIdIdx2] + }) + } - // set data disk delete_with_instance_prepaid - for i := range dataDiskList { - dataDiskList[i]["delete_with_instance_prepaid"] = false - if hasDataDisks { - tmpDataDisk := tmpDataDisks[i].(map[string]interface{}) - if deleteWithInstancePrepaidBool, ok := tmpDataDisk["delete_with_instance_prepaid"].(bool); ok { - dataDiskList[i]["delete_with_instance_prepaid"] = deleteWithInstancePrepaidBool + // set data disk delete_with_instance_prepaid + for i := range dataDiskList { + dataDiskList[i]["delete_with_instance_prepaid"] = false + if hasDataDisks { + tmpDataDisk := tmpDataDisks[i].(map[string]interface{}) + if deleteWithInstancePrepaidBool, ok := tmpDataDisk["delete_with_instance_prepaid"].(bool); ok { + dataDiskList[i]["delete_with_instance_prepaid"] = deleteWithInstancePrepaidBool + } } } - } - // set data disk name - finalDiskIds := make([]*string, 0, len(dataDiskList)) - for _, item := range dataDiskList { - diskId := item["data_disk_id"].(*string) - finalDiskIds = append(finalDiskIds, diskId) - } + // set data disk name + finalDiskIds := make([]*string, 0, len(dataDiskList)) + for _, item := range dataDiskList { + diskId := item["data_disk_id"].(*string) + finalDiskIds = append(finalDiskIds, diskId) + } - if len(finalDiskIds) != 0 { - err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - disks, err := cbsService.DescribeDiskList(ctx, finalDiskIds) - if err != nil { - return tccommon.RetryError(err) - } + if len(finalDiskIds) != 0 { + err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + disks, err := cbsService.DescribeDiskList(ctx, finalDiskIds) + if err != nil { + return tccommon.RetryError(err) + } - for _, disk := range disks { - diskId := disk.DiskId - for _, v := range dataDiskList { - tmpDiskId := v["data_disk_id"].(*string) - if *diskId == *tmpDiskId { - v["data_disk_name"] = disk.DiskName - break + for _, disk := range disks { + diskId := disk.DiskId + for _, v := range dataDiskList { + tmpDiskId := v["data_disk_id"].(*string) + if *diskId == *tmpDiskId { + v["data_disk_name"] = disk.DiskName + break + } } } - } - return nil - }) + return nil + }) - if err != nil { - return err + if err != nil { + return err + } } - } - _ = d.Set("data_disks", dataDiskList) + _ = d.Set("data_disks", dataDiskList) + } } else if len(instance.DataDisks) > 0 && hasDataDisksName { // scene with no disks name dDiskHash := make([]map[string]interface{}, 0) @@ -1251,69 +1257,107 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{}) } } - err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { - cbsDisks, err = cbsService.DescribeDiskList(ctx, diskIds) + if len(diskIds) > 0 { + err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + cbsDisks, err = cbsService.DescribeDiskList(ctx, diskIds) + if err != nil { + return tccommon.RetryError(err) + } + + for i := range cbsDisks { + disk := cbsDisks[i] + if *disk.DiskState == "EXPANDING" { + return resource.RetryableError(fmt.Errorf("data_disk[%d] is expending", i)) + } + } + + return nil + }) + if err != nil { - return tccommon.RetryError(err) + return err } - for i := range cbsDisks { - disk := cbsDisks[i] - if *disk.DiskState == "EXPANDING" { - return resource.RetryableError(fmt.Errorf("data_disk[%d] is expending", i)) + // make data disks data + sourceDataDisks := make([]*map[string]interface{}, 0) + for _, cvmDisk := range instance.DataDisks { + for _, cbsDisk := range cbsDisks { + if *cvmDisk.DiskId == *cbsDisk.DiskId { + dataDisk := make(map[string]interface{}, 10) + dataDisk["data_disk_id"] = cvmDisk.DiskId + dataDisk["data_disk_size"] = cvmDisk.DiskSize + dataDisk["data_disk_name"] = cbsDisk.DiskName + dataDisk["data_disk_type"] = cvmDisk.DiskType + dataDisk["data_disk_snapshot_id"] = cvmDisk.SnapshotId + dataDisk["delete_with_instance"] = cvmDisk.DeleteWithInstance + dataDisk["encrypt"] = cvmDisk.Encrypt + dataDisk["throughput_performance"] = cvmDisk.ThroughputPerformance + dataDisk["flag"] = 0 + sourceDataDisks = append(sourceDataDisks, &dataDisk) + break + } } } - return nil - }) - - if err != nil { - return err - } + // has set disk name first + for v := range sourceDataDisks { + for i := range dDiskHash { + disk := *sourceDataDisks[v] + diskFlag := disk["flag"].(int) + diskName := disk["data_disk_name"].(*string) + diskType := disk["data_disk_type"].(*string) + diskSize := disk["data_disk_size"].(*int64) + deleteWithInstance := disk["delete_with_instance"].(*bool) + encrypt := disk["encrypt"].(*bool) + tmpHash := getDataDiskHash(diskHash{ + diskType: *diskType, + diskSize: *diskSize, + deleteWithInstance: *deleteWithInstance, + encrypt: *encrypt, + }) - // make data disks data - sourceDataDisks := make([]*map[string]interface{}, 0) - for _, cvmDisk := range instance.DataDisks { - for _, cbsDisk := range cbsDisks { - if *cvmDisk.DiskId == *cbsDisk.DiskId { - dataDisk := make(map[string]interface{}, 10) - dataDisk["data_disk_id"] = cvmDisk.DiskId - dataDisk["data_disk_size"] = cvmDisk.DiskSize - dataDisk["data_disk_name"] = cbsDisk.DiskName - dataDisk["data_disk_type"] = cvmDisk.DiskType - dataDisk["data_disk_snapshot_id"] = cvmDisk.SnapshotId - dataDisk["delete_with_instance"] = cvmDisk.DeleteWithInstance - dataDisk["encrypt"] = cvmDisk.Encrypt - dataDisk["throughput_performance"] = cvmDisk.ThroughputPerformance - dataDisk["flag"] = 0 - sourceDataDisks = append(sourceDataDisks, &dataDisk) - break + // get disk name + hashItem := dDiskHash[i] + if _, ok := hashItem[*diskName]; ok { + // check hash and flag + if hashItem["flag"] == 0 && diskFlag == 0 && tmpHash == hashItem[*diskName] { + dataDisk := make(map[string]interface{}, 8) + dataDisk["data_disk_id"] = disk["data_disk_id"] + dataDisk["data_disk_size"] = disk["data_disk_size"] + dataDisk["data_disk_name"] = disk["data_disk_name"] + dataDisk["data_disk_type"] = disk["data_disk_type"] + dataDisk["data_disk_snapshot_id"] = disk["data_disk_snapshot_id"] + dataDisk["delete_with_instance"] = disk["delete_with_instance"] + dataDisk["encrypt"] = disk["encrypt"] + dataDisk["throughput_performance"] = disk["throughput_performance"] + tmpDataDiskMap[hashItem["index"].(int)] = dataDisk + hashItem["flag"] = 1 + disk["flag"] = 1 + break + } + } } } - } - // has set disk name first - for v := range sourceDataDisks { - for i := range dDiskHash { - disk := *sourceDataDisks[v] - diskFlag := disk["flag"].(int) - diskName := disk["data_disk_name"].(*string) - diskType := disk["data_disk_type"].(*string) - diskSize := disk["data_disk_size"].(*int64) - deleteWithInstance := disk["delete_with_instance"].(*bool) - encrypt := disk["encrypt"].(*bool) - tmpHash := getDataDiskHash(diskHash{ - diskType: *diskType, - diskSize: *diskSize, - deleteWithInstance: *deleteWithInstance, - encrypt: *encrypt, - }) + // no set disk name last + for v := range sourceDataDisks { + for i := range dDiskHash { + disk := *sourceDataDisks[v] + diskFlag := disk["flag"].(int) + diskType := disk["data_disk_type"].(*string) + diskSize := disk["data_disk_size"].(*int64) + deleteWithInstance := disk["delete_with_instance"].(*bool) + encrypt := disk["encrypt"].(*bool) + tmpHash := getDataDiskHash(diskHash{ + diskType: *diskType, + diskSize: *diskSize, + deleteWithInstance: *deleteWithInstance, + encrypt: *encrypt, + }) - // get disk name - hashItem := dDiskHash[i] - if _, ok := hashItem[*diskName]; ok { // check hash and flag - if hashItem["flag"] == 0 && diskFlag == 0 && tmpHash == hashItem[*diskName] { + hashItem := dDiskHash[i] + if hashItem["flag"] == 0 && diskFlag == 0 && tmpHash == hashItem[strconv.Itoa(i)] { dataDisk := make(map[string]interface{}, 8) dataDisk["data_disk_id"] = disk["data_disk_id"] dataDisk["data_disk_size"] = disk["data_disk_size"] @@ -1330,70 +1374,34 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{}) } } } - } - // no set disk name last - for v := range sourceDataDisks { - for i := range dDiskHash { - disk := *sourceDataDisks[v] - diskFlag := disk["flag"].(int) - diskType := disk["data_disk_type"].(*string) - diskSize := disk["data_disk_size"].(*int64) - deleteWithInstance := disk["delete_with_instance"].(*bool) - encrypt := disk["encrypt"].(*bool) - tmpHash := getDataDiskHash(diskHash{ - diskType: *diskType, - diskSize: *diskSize, - deleteWithInstance: *deleteWithInstance, - encrypt: *encrypt, - }) - - // check hash and flag - hashItem := dDiskHash[i] - if hashItem["flag"] == 0 && diskFlag == 0 && tmpHash == hashItem[strconv.Itoa(i)] { - dataDisk := make(map[string]interface{}, 8) - dataDisk["data_disk_id"] = disk["data_disk_id"] - dataDisk["data_disk_size"] = disk["data_disk_size"] - dataDisk["data_disk_name"] = disk["data_disk_name"] - dataDisk["data_disk_type"] = disk["data_disk_type"] - dataDisk["data_disk_snapshot_id"] = disk["data_disk_snapshot_id"] - dataDisk["delete_with_instance"] = disk["delete_with_instance"] - dataDisk["encrypt"] = disk["encrypt"] - dataDisk["throughput_performance"] = disk["throughput_performance"] - tmpDataDiskMap[hashItem["index"].(int)] = dataDisk - hashItem["flag"] = 1 - disk["flag"] = 1 - break - } + keys := make([]int, 0, len(tmpDataDiskMap)) + for k := range tmpDataDiskMap { + keys = append(keys, k) } - } - keys := make([]int, 0, len(tmpDataDiskMap)) - for k := range tmpDataDiskMap { - keys = append(keys, k) - } - - sort.Ints(keys) - for _, v := range keys { - tmpDataDisk := tmpDataDiskMap[v].(map[string]interface{}) - dataDiskList = append(dataDiskList, tmpDataDisk) - } + sort.Ints(keys) + for _, v := range keys { + tmpDataDisk := tmpDataDiskMap[v].(map[string]interface{}) + dataDiskList = append(dataDiskList, tmpDataDisk) + } - // set data disk delete_with_instance_prepaid - if v, ok := d.GetOk("data_disks"); ok { - tmpDataDisks := v.([]interface{}) - for i := range tmpDataDisks { - dataDiskList[i]["delete_with_instance_prepaid"] = false - if hasDataDisks { - tmpDataDisk := tmpDataDisks[i].(map[string]interface{}) - if deleteWithInstancePrepaidBool, ok := tmpDataDisk["delete_with_instance_prepaid"].(bool); ok { - dataDiskList[i]["delete_with_instance_prepaid"] = deleteWithInstancePrepaidBool + // set data disk delete_with_instance_prepaid + if v, ok := d.GetOk("data_disks"); ok { + tmpDataDisks := v.([]interface{}) + for i := range tmpDataDisks { + dataDiskList[i]["delete_with_instance_prepaid"] = false + if hasDataDisks { + tmpDataDisk := tmpDataDisks[i].(map[string]interface{}) + if deleteWithInstancePrepaidBool, ok := tmpDataDisk["delete_with_instance_prepaid"].(bool); ok { + dataDiskList[i]["delete_with_instance_prepaid"] = deleteWithInstancePrepaidBool + } } } } - } - _ = d.Set("data_disks", dataDiskList) + _ = d.Set("data_disks", dataDiskList) + } } else { _ = d.Set("data_disks", dataDiskList) } From 7f199d14cc3abc8f7fa64efb987e3879c1301275 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Mon, 20 Jan 2025 15:56:42 +0800 Subject: [PATCH 2/6] add --- .changelog/3089.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/3089.txt diff --git a/.changelog/3089.txt b/.changelog/3089.txt new file mode 100644 index 0000000000..8464275ff8 --- /dev/null +++ b/.changelog/3089.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/tencentcloud_instance: support local disks +``` From b082adf2d34f751e8d5f46cdca4eced85aaefc4e Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Tue, 21 Jan 2025 16:37:09 +0800 Subject: [PATCH 3/6] add --- .../services/cvm/resource_tc_instance_test.go | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tencentcloud/services/cvm/resource_tc_instance_test.go b/tencentcloud/services/cvm/resource_tc_instance_test.go index 7d173298e0..061fcd0b1f 100644 --- a/tencentcloud/services/cvm/resource_tc_instance_test.go +++ b/tencentcloud/services/cvm/resource_tc_instance_test.go @@ -1680,6 +1680,75 @@ resource "tencentcloud_cbs_storage_attachment" "attachment_cbs_disk2" { ` +func TestAccTencentCloudInstanceResourceWithLocalDisk(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.AccPreCheck(t) + }, + Providers: acctest.AccProviders, + CheckDestroy: testAccCheckCvmInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCvmInstanceResource_LocalDiskCreate, + Check: resource.ComposeTestCheckFunc( + testAccCheckCvmInstanceExists("tencentcloud_instance.local_disk"), + resource.TestCheckResourceAttr("tencentcloud_instance.local_disk", "instance_status", "RUNNING")), + }, + }, + }) +} + +const testAccCvmInstanceResource_LocalDiskCreate = ` +data "tencentcloud_images" "default" { + image_type = ["PUBLIC_IMAGE"] + image_name_regex = "Final" +} + +resource "tencentcloud_vpc" "vpc" { + name = "vpc" + cidr_block = "10.0.0.0/16" +} + +resource "tencentcloud_subnet" "subnet" { + vpc_id = tencentcloud_vpc.vpc.id + name = "subnet" + cidr_block = "10.0.0.0/16" + availability_zone = "ap-guangzhou-6" +} + +resource "tencentcloud_instance" "example" { + instance_name = "tf-example" + availability_zone = "ap-guangzhou-6" + image_id = data.tencentcloud_images.default.images.0.image_id + instance_type = "IT5.4XLARGE64" + system_disk_type = "LOCAL_BASIC" + system_disk_size = 50 + hostname = "user" + project_id = 0 + vpc_id = tencentcloud_vpc.vpc.id + subnet_id = tencentcloud_subnet.subnet.id + + data_disks { + data_disk_type = "CLOUD_HSSD" + data_disk_size = 50 + encrypt = false + data_disk_name = "tf-test1" + } + + data_disks { + data_disk_type = "CLOUD_HSSD" + data_disk_size = 60 + encrypt = false + data_disk_name = "tf-test2" + } + + tags = { + tagKey = "tagValue" + } +} +` + func TestAccTencentCloudNeedFixInstancePostpaidToPrepaid(t *testing.T) { id := "tencentcloud_instance.foo" From 9a452790afcd242e27a84e3e611956f2ca7033c4 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Tue, 21 Jan 2025 17:35:23 +0800 Subject: [PATCH 4/6] add --- tencentcloud/services/cvm/resource_tc_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tencentcloud/services/cvm/resource_tc_instance_test.go b/tencentcloud/services/cvm/resource_tc_instance_test.go index 061fcd0b1f..3c2e3e08aa 100644 --- a/tencentcloud/services/cvm/resource_tc_instance_test.go +++ b/tencentcloud/services/cvm/resource_tc_instance_test.go @@ -1717,7 +1717,7 @@ resource "tencentcloud_subnet" "subnet" { availability_zone = "ap-guangzhou-6" } -resource "tencentcloud_instance" "example" { +resource "tencentcloud_instance" "local_disk" { instance_name = "tf-example" availability_zone = "ap-guangzhou-6" image_id = data.tencentcloud_images.default.images.0.image_id From 7c01721170931b44d98d6dfa97c7328f7a567ade Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Tue, 21 Jan 2025 17:58:49 +0800 Subject: [PATCH 5/6] add --- tencentcloud/services/cvm/resource_tc_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tencentcloud/services/cvm/resource_tc_instance.go b/tencentcloud/services/cvm/resource_tc_instance.go index 0e3a76a4a7..4492c4fa08 100644 --- a/tencentcloud/services/cvm/resource_tc_instance.go +++ b/tencentcloud/services/cvm/resource_tc_instance.go @@ -1389,7 +1389,7 @@ func resourceTencentCloudInstanceRead(d *schema.ResourceData, meta interface{}) // set data disk delete_with_instance_prepaid if v, ok := d.GetOk("data_disks"); ok { tmpDataDisks := v.([]interface{}) - for i := range tmpDataDisks { + for i := range dataDiskList { dataDiskList[i]["delete_with_instance_prepaid"] = false if hasDataDisks { tmpDataDisk := tmpDataDisks[i].(map[string]interface{}) From ed5e02802134e716ae13d467abeb68bf19e02ac7 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Tue, 21 Jan 2025 20:01:59 +0800 Subject: [PATCH 6/6] add --- .../services/cbs/service_tencentcloud_cbs.go | 30 ++++++++++++++++--- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/tencentcloud/services/cbs/service_tencentcloud_cbs.go b/tencentcloud/services/cbs/service_tencentcloud_cbs.go index 520392a7ac..b88a4f11f7 100644 --- a/tencentcloud/services/cbs/service_tencentcloud_cbs.go +++ b/tencentcloud/services/cbs/service_tencentcloud_cbs.go @@ -311,12 +311,34 @@ func (me *CbsService) ResizeDisk(ctx context.Context, diskId string, diskSize in ratelimit.Check(request.GetAction()) response, err := me.client.UseCbsClient().ResizeDisk(request) if err != nil { - log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", - logId, request.GetAction(), request.ToJsonString(), err.Error()) + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), err.Error()) return err } - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", - logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + err = resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + storage, e := me.DescribeDiskById(ctx, diskId) + if e != nil { + return tccommon.RetryError(e) + } + + if *storage.DiskState == CBS_STORAGE_STATUS_EXPANDING { + return resource.RetryableError(fmt.Errorf("cbs storage status is %s", *storage.DiskState)) + } + + if *storage.DiskSize != uint64(diskSize) { + return resource.RetryableError(fmt.Errorf("waiting for cbs size changed to %d, now %d", diskSize, *storage.DiskSize)) + } + + return nil + }) + + if err != nil { + log.Printf("[CRITAL]%s resize cbs failed, reason:%s\n ", logId, err.Error()) + return err + } + return nil }