diff --git a/castai/resource_workload_scaling_policy.go b/castai/resource_workload_scaling_policy.go index 0be82dde..ff7a8bb9 100644 --- a/castai/resource_workload_scaling_policy.go +++ b/castai/resource_workload_scaling_policy.go @@ -323,14 +323,28 @@ func resourceWorkloadScalingPolicyCreate(ctx context.Context, d *schema.Resource req.RecommendationPolicies.AntiAffinity = toAntiAffinity(toSection(d, "anti_affinity")) - resp, err := client.WorkloadOptimizationAPICreateWorkloadScalingPolicyWithResponse(ctx, clusterID, req) - if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil { - return diag.FromErr(checkErr) + create, err := client.WorkloadOptimizationAPICreateWorkloadScalingPolicyWithResponse(ctx, clusterID, req) + if err != nil { + return diag.FromErr(err) } - d.SetId(resp.JSON200.Id) - - return resourceWorkloadScalingPolicyRead(ctx, d, meta) + switch create.StatusCode() { + case http.StatusOK: + d.SetId(create.JSON200.Id) + return resourceWorkloadScalingPolicyRead(ctx, d, meta) + case http.StatusConflict: + policy, err := getWorkloadScalingPolicyByName(ctx, client, clusterID, req.Name) + if err != nil { + return diag.FromErr(err) + } + if policy.IsDefault { + d.SetId(policy.Id) + return resourceWorkloadScalingPolicyUpdate(ctx, d, meta) + } + return diag.Errorf("scaling policy with name %q already exists", req.Name) + default: + return diag.Errorf("expected status code %d, received: status=%d body=%s", http.StatusOK, create.StatusCode(), string(create.GetBody())) + } } func resourceWorkloadScalingPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -542,37 +556,30 @@ func validateResourceLimit(r sdk.WorkloadoptimizationV1ResourcePolicies) error { } func workloadScalingPolicyImporter(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - ids := strings.Split(d.Id(), "/") - if len(ids) != 2 || ids[0] == "" || ids[1] == "" { + clusterID, nameOrID, found := strings.Cut(d.Id(), "/") + if !found { return nil, fmt.Errorf("expected import id with format: /, got: %q", d.Id()) } - clusterID, nameOrID := ids[0], ids[1] if err := d.Set(FieldClusterID, clusterID); err != nil { - return nil, fmt.Errorf("setting cluster nameOrID: %w", err) + return nil, fmt.Errorf("setting cluster ID: %w", err) } - d.SetId(nameOrID) // Return if scaling policy ID provided. if _, err := uuid.Parse(nameOrID); err == nil { + d.SetId(nameOrID) return []*schema.ResourceData{d}, nil } // Find scaling policy ID by name. client := meta.(*ProviderConfig).api - resp, err := client.WorkloadOptimizationAPIListWorkloadScalingPoliciesWithResponse(ctx, clusterID) - if err := sdk.CheckOKResponse(resp, err); err != nil { + policy, err := getWorkloadScalingPolicyByName(ctx, client, clusterID, nameOrID) + if err != nil { return nil, err } - for _, sp := range resp.JSON200.Items { - if sp.Name == nameOrID { - d.SetId(sp.Id) - return []*schema.ResourceData{d}, nil - } - } - - return nil, fmt.Errorf("failed to find workload scaling policy with the following name: %v", nameOrID) + d.SetId(policy.Id) + return []*schema.ResourceData{d}, nil } func toWorkloadScalingPolicies(obj map[string]interface{}) sdk.WorkloadoptimizationV1ResourcePolicies { @@ -871,3 +878,17 @@ func toAntiAffinityMap(s *sdk.WorkloadoptimizationV1AntiAffinitySettings) []map[ return []map[string]any{m} } + +func getWorkloadScalingPolicyByName(ctx context.Context, client sdk.ClientWithResponsesInterface, clusterID, name string) (*sdk.WorkloadoptimizationV1WorkloadScalingPolicy, error) { + list, err := client.WorkloadOptimizationAPIListWorkloadScalingPoliciesWithResponse(ctx, clusterID) + if checkErr := sdk.CheckOKResponse(list, err); checkErr != nil { + return nil, checkErr + } + + for _, sp := range list.JSON200.Items { + if sp.Name == name { + return &sp, nil + } + } + return nil, fmt.Errorf("policy with name %q not found", name) +} diff --git a/docs/resources/workload_scaling_policy.md b/docs/resources/workload_scaling_policy.md index 6753e903..26189b7c 100644 --- a/docs/resources/workload_scaling_policy.md +++ b/docs/resources/workload_scaling_policy.md @@ -225,12 +225,117 @@ Optional: - `read` (String) - `update` (String) - ## Importing + +For each connected cluster, a default scaling policy is created. An existing scaling policy can be imported into the +Terraform state using the `terraform import` command or the [`import`](https://developer.hashicorp.com/terraform/language/import#syntax) block (recommended for Terraform 1.5.0+). + +Using the `import` block is a simpler and more convenient way of importing resources. + +### Import using `import` block + +#### Import a single scaling policy + +1. Create an `import.tf` file with the following content: + ```tf + import { + to = castai_workload_scaling_policy.default + id = "/" # e.g. "ff4c2211-3511-4d95-b6de-2919fc3287a3/default" + } + ``` + +2. Run the `terraform import` command: + + ```shell + terraform plan -out=import.plan -var-file=tf.vars -generate-config-out=generated.tf + ``` + +3. Review the `generated.tf` file and ensure the imported scaling policy is correct. Terraform will generate this file by setting values equal to zero for certain configuration parameters. + + For example: + + ```hcl + cpu { + look_back_period_seconds = 0 + } + +4. Apply the import plan: + + ```shell + terraform apply "import.plan" + ``` + +#### Import multiple scaling policies + +To import multiple scaling policies, you need to know the cluster IDs and the policy names. The `for_each` cannot be +used when generating configuration. As a result, you need to define the policy properties yourself, or you +can [import a single policy](#import-a-single-scaling-policy) and then use it as a template for other policies. + +> [!NOTE] +> The below example assumes that you want to import the "default" scaling policy for multiple clusters. If you want to +> import +> scaling policies with different names, you need to adjust the `id` parameter in the `import` block accordingly. + +1. Create the `import.tf` file with the following content: + + ```tf + locals { + policies = { + "" = "" + "" = "" + "" = "" + } + } + + import { + for_each = local.policies + to = castai_workload_scaling_policy.default[each.key] + id = "${each.value}/default" + } + + resource "castai_workload_scaling_policy" "default" { + for_each = local.policies + cluster_id = each.value + apply_type = "IMMEDIATE" + management_option = "READ_ONLY" + name = "default" + cpu { + apply_threshold = 0.1 + args = ["0.80"] + function = "QUANTILE" + look_back_period_seconds = 86400 + min = 0.01 + } + memory { + apply_threshold = 0.1 + args = [] + function = "MAX" + look_back_period_seconds = 86400 + min = 10 + overhead = 0.1 + } + } + ``` + +2. Run the `terraform import` command and review the import plan: + + ```shell + terraform plan -out=import.plan -var-file=tf.vars + ``` + +3. Apply the import plan: + + ```shell + terraform apply "import.plan" + ``` + +### Import using the `terraform import` command + You can use the `terraform import` command to import existing scaling policy to Terraform state. To import a resource, first write a resource block for it in your configuration, establishing the name by which it will be known to Terraform: + ```hcl resource "castai_workload_scaling_policy" "services" { # ... @@ -245,4 +350,10 @@ $ terraform import castai_workload_scaling_policy.services /services If you are using CAST AI Terraform modules, import command will be slightly different: ```shell $ terraform import 'module.castai-eks-cluster.castai_workload_scaling_policy.this["services"]' /services -``` \ No newline at end of file +``` + +## Upsert scaling policy + +The recommended way is to [import](#importing) the scaling policy and then apply the changes to the policy. +However, if that’s not possible, you can define the default policy resource yourself. The CAST AI Terraform provider +will update the existing policy instead of returning an error. diff --git a/templates/resources/workload_scaling_policy.md.tmpl b/templates/resources/workload_scaling_policy.md.tmpl index 078997bc..76fcc5c9 100644 --- a/templates/resources/workload_scaling_policy.md.tmpl +++ b/templates/resources/workload_scaling_policy.md.tmpl @@ -18,12 +18,117 @@ simultaneously or create custom policies with different settings and apply them {{ .SchemaMarkdown | trimspace }} - ## Importing + +For each connected cluster, a default scaling policy is created. An existing scaling policy can be imported into the +Terraform state using the `terraform import` command or the [`import`](https://developer.hashicorp.com/terraform/language/import#syntax) block (recommended for Terraform 1.5.0+). + +Using the `import` block is a simpler and more convenient way of importing resources. + +### Import using `import` block + +#### Import a single scaling policy + +1. Create an `import.tf` file with the following content: + ```tf + import { + to = castai_workload_scaling_policy.default + id = "/" # e.g. "ff4c2211-3511-4d95-b6de-2919fc3287a3/default" + } + ``` + +2. Run the `terraform import` command: + + ```shell + terraform plan -out=import.plan -var-file=tf.vars -generate-config-out=generated.tf + ``` + +3. Review the `generated.tf` file and ensure the imported scaling policy is correct. Terraform will generate this file by setting values equal to zero for certain configuration parameters. + + For example: + + ```hcl + cpu { + look_back_period_seconds = 0 + } + +4. Apply the import plan: + + ```shell + terraform apply "import.plan" + ``` + +#### Import multiple scaling policies + +To import multiple scaling policies, you need to know the cluster IDs and the policy names. The `for_each` cannot be +used when generating configuration. As a result, you need to define the policy properties yourself, or you +can [import a single policy](#import-a-single-scaling-policy) and then use it as a template for other policies. + +> [!NOTE] +> The below example assumes that you want to import the "default" scaling policy for multiple clusters. If you want to +> import +> scaling policies with different names, you need to adjust the `id` parameter in the `import` block accordingly. + +1. Create the `import.tf` file with the following content: + + ```tf + locals { + policies = { + "" = "" + "" = "" + "" = "" + } + } + + import { + for_each = local.policies + to = castai_workload_scaling_policy.default[each.key] + id = "${each.value}/default" + } + + resource "castai_workload_scaling_policy" "default" { + for_each = local.policies + cluster_id = each.value + apply_type = "IMMEDIATE" + management_option = "READ_ONLY" + name = "default" + cpu { + apply_threshold = 0.1 + args = ["0.80"] + function = "QUANTILE" + look_back_period_seconds = 86400 + min = 0.01 + } + memory { + apply_threshold = 0.1 + args = [] + function = "MAX" + look_back_period_seconds = 86400 + min = 10 + overhead = 0.1 + } + } + ``` + +2. Run the `terraform import` command and review the import plan: + + ```shell + terraform plan -out=import.plan -var-file=tf.vars + ``` + +3. Apply the import plan: + + ```shell + terraform apply "import.plan" + ``` + +### Import using the `terraform import` command + You can use the `terraform import` command to import existing scaling policy to Terraform state. To import a resource, first write a resource block for it in your configuration, establishing the name by which it will be known to Terraform: + ```hcl resource "castai_workload_scaling_policy" "services" { # ... @@ -38,4 +143,10 @@ $ terraform import castai_workload_scaling_policy.services /services If you are using CAST AI Terraform modules, import command will be slightly different: ```shell $ terraform import 'module.castai-eks-cluster.castai_workload_scaling_policy.this["services"]' /services -``` \ No newline at end of file +``` + +## Upsert scaling policy + +The recommended way is to [import](#importing) the scaling policy and then apply the changes to the policy. +However, if that’s not possible, you can define the default policy resource yourself. The CAST AI Terraform provider +will update the existing policy instead of returning an error.