Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle the creation of default scaling policies #443

Merged
merged 6 commits into from
Jan 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 42 additions & 21 deletions castai/resource_workload_scaling_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,14 +323,28 @@ func resourceWorkloadScalingPolicyCreate(ctx context.Context, d *schema.Resource

req.RecommendationPolicies.AntiAffinity = toAntiAffinity(toSection(d, "anti_affinity"))

resp, err := client.WorkloadOptimizationAPICreateWorkloadScalingPolicyWithResponse(ctx, clusterID, req)
if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
return diag.FromErr(checkErr)
create, err := client.WorkloadOptimizationAPICreateWorkloadScalingPolicyWithResponse(ctx, clusterID, req)
if err != nil {
return diag.FromErr(err)
}

d.SetId(resp.JSON200.Id)

return resourceWorkloadScalingPolicyRead(ctx, d, meta)
switch create.StatusCode() {
case http.StatusOK:
d.SetId(create.JSON200.Id)
return resourceWorkloadScalingPolicyRead(ctx, d, meta)
case http.StatusConflict:
policy, err := getWorkloadScalingPolicyByName(ctx, client, clusterID, req.Name)
if err != nil {
return diag.FromErr(err)
}
if policy.IsDefault {
d.SetId(policy.Id)
return resourceWorkloadScalingPolicyUpdate(ctx, d, meta)
}
return diag.Errorf("scaling policy with name %q already exists", req.Name)
default:
return diag.Errorf("expected status code %d, received: status=%d body=%s", http.StatusOK, create.StatusCode(), string(create.GetBody()))
}
}

func resourceWorkloadScalingPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
Expand Down Expand Up @@ -542,37 +556,30 @@ func validateResourceLimit(r sdk.WorkloadoptimizationV1ResourcePolicies) error {
}

func workloadScalingPolicyImporter(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
ids := strings.Split(d.Id(), "/")
if len(ids) != 2 || ids[0] == "" || ids[1] == "" {
clusterID, nameOrID, found := strings.Cut(d.Id(), "/")
if !found {
return nil, fmt.Errorf("expected import id with format: <cluster_id>/<scaling_policy name or id>, got: %q", d.Id())
}

clusterID, nameOrID := ids[0], ids[1]
if err := d.Set(FieldClusterID, clusterID); err != nil {
return nil, fmt.Errorf("setting cluster nameOrID: %w", err)
return nil, fmt.Errorf("setting cluster ID: %w", err)
}
d.SetId(nameOrID)

// Return if scaling policy ID provided.
if _, err := uuid.Parse(nameOrID); err == nil {
d.SetId(nameOrID)
return []*schema.ResourceData{d}, nil
}

// Find scaling policy ID by name.
client := meta.(*ProviderConfig).api
resp, err := client.WorkloadOptimizationAPIListWorkloadScalingPoliciesWithResponse(ctx, clusterID)
if err := sdk.CheckOKResponse(resp, err); err != nil {
policy, err := getWorkloadScalingPolicyByName(ctx, client, clusterID, nameOrID)
if err != nil {
return nil, err
}

for _, sp := range resp.JSON200.Items {
if sp.Name == nameOrID {
d.SetId(sp.Id)
return []*schema.ResourceData{d}, nil
}
}

return nil, fmt.Errorf("failed to find workload scaling policy with the following name: %v", nameOrID)
d.SetId(policy.Id)
return []*schema.ResourceData{d}, nil
}

func toWorkloadScalingPolicies(obj map[string]interface{}) sdk.WorkloadoptimizationV1ResourcePolicies {
Expand Down Expand Up @@ -871,3 +878,17 @@ func toAntiAffinityMap(s *sdk.WorkloadoptimizationV1AntiAffinitySettings) []map[

return []map[string]any{m}
}

func getWorkloadScalingPolicyByName(ctx context.Context, client sdk.ClientWithResponsesInterface, clusterID, name string) (*sdk.WorkloadoptimizationV1WorkloadScalingPolicy, error) {
list, err := client.WorkloadOptimizationAPIListWorkloadScalingPoliciesWithResponse(ctx, clusterID)
if checkErr := sdk.CheckOKResponse(list, err); checkErr != nil {
return nil, checkErr
}

for _, sp := range list.JSON200.Items {
if sp.Name == name {
return &sp, nil
}
}
return nil, fmt.Errorf("policy with name %q not found", name)
}
115 changes: 113 additions & 2 deletions docs/resources/workload_scaling_policy.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

115 changes: 113 additions & 2 deletions templates/resources/workload_scaling_policy.md.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,117 @@ simultaneously or create custom policies with different settings and apply them

{{ .SchemaMarkdown | trimspace }}


## Importing

For each connected cluster, a default scaling policy is created. An existing scaling policy can be imported into the
Terraform state using the `terraform import` command or the [`import`](https://developer.hashicorp.com/terraform/language/import#syntax) block (recommended for Terraform 1.5.0+).

Using the `import` block is a simpler and more convenient way of importing resources.

### Import using `import` block

#### Import a single scaling policy

1. Create an `import.tf` file with the following content:
```tf
import {
to = castai_workload_scaling_policy.default
id = "<cluster_id>/<policy_id_or_name>" # e.g. "ff4c2211-3511-4d95-b6de-2919fc3287a3/default"
}
```

2. Run the `terraform import` command:

```shell
terraform plan -out=import.plan -var-file=tf.vars -generate-config-out=generated.tf
```

3. Review the `generated.tf` file and ensure the imported scaling policy is correct. Terraform will generate this file by setting values equal to zero for certain configuration parameters.

For example:

```hcl
cpu {
look_back_period_seconds = 0
}

4. Apply the import plan:

```shell
terraform apply "import.plan"
```

#### Import multiple scaling policies

To import multiple scaling policies, you need to know the cluster IDs and the policy names. The `for_each` cannot be
used when generating configuration. As a result, you need to define the policy properties yourself, or you
can [import a single policy](#import-a-single-scaling-policy) and then use it as a template for other policies.

> [!NOTE]
> The below example assumes that you want to import the "default" scaling policy for multiple clusters. If you want to
> import
> scaling policies with different names, you need to adjust the `id` parameter in the `import` block accordingly.

1. Create the `import.tf` file with the following content:

```tf
locals {
policies = {
"<cluster_name>" = "<cluster_id>"
"<cluster_name>" = "<cluster_id>"
"<cluster_name>" = "<cluster_id>"
}
}

import {
for_each = local.policies
to = castai_workload_scaling_policy.default[each.key]
id = "${each.value}/default"
}

resource "castai_workload_scaling_policy" "default" {
for_each = local.policies
cluster_id = each.value
apply_type = "IMMEDIATE"
management_option = "READ_ONLY"
name = "default"
cpu {
apply_threshold = 0.1
args = ["0.80"]
function = "QUANTILE"
look_back_period_seconds = 86400
min = 0.01
}
memory {
apply_threshold = 0.1
args = []
function = "MAX"
look_back_period_seconds = 86400
min = 10
overhead = 0.1
}
}
```

2. Run the `terraform import` command and review the import plan:

```shell
terraform plan -out=import.plan -var-file=tf.vars
```

3. Apply the import plan:

```shell
terraform apply "import.plan"
```

### Import using the `terraform import` command

You can use the `terraform import` command to import existing scaling policy to Terraform state.
mszostok marked this conversation as resolved.
Show resolved Hide resolved

To import a resource, first write a resource block for it in your configuration, establishing the name by which
it will be known to Terraform:

```hcl
resource "castai_workload_scaling_policy" "services" {
# ...
Expand All @@ -38,4 +143,10 @@ $ terraform import castai_workload_scaling_policy.services <cluster_id>/services
If you are using CAST AI Terraform modules, import command will be slightly different:
```shell
$ terraform import 'module.castai-eks-cluster.castai_workload_scaling_policy.this["services"]' <cluster_id>/services
```
```

## Upsert scaling policy

The recommended way is to [import](#importing) the scaling policy and then apply the changes to the policy.
However, if that’s not possible, you can define the default policy resource yourself. The CAST AI Terraform provider
will update the existing policy instead of returning an error.
Loading