Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(object): getting a 403 when reading bucket should not exit the provider #2302

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions scaleway/helpers_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
awspolicy "github.com/hashicorp/awspolicyequivalence"
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/scaleway/scaleway-sdk-go/scw"
Expand Down Expand Up @@ -590,3 +592,48 @@ func normalizeOwnerID(id *string) *string {

return &tab[0]
}

func addReadBucketErrorDiagnostic(diags *diag.Diagnostics, err error, resource string, awsResourceNotFoundCode string) (bucketFound bool, resourceFound bool) {
switch {
case isS3Err(err, s3.ErrCodeNoSuchBucket, ""):
*diags = append(*diags, diag.Diagnostic{
Severity: diag.Warning,
Summary: "Bucket not found",
Detail: "Got 404 error while reading bucket, removing from state",
})
return false, false

case isS3Err(err, awsResourceNotFoundCode, ""):
return true, false

case isS3Err(err, ErrCodeAccessDenied, ""):
d := diag.Diagnostic{
Severity: diag.Warning,
Summary: fmt.Sprintf("Cannot read bucket %s: Forbidden", resource),
Detail: fmt.Sprintf("Got 403 error while reading bucket %s, please check your IAM permissions and your bucket policy", resource),
}

attributes := map[string]string{
"acl": "acl",
"object lock configuration": "object_lock_enabled",
"objects": "",
"tags": "tags",
"CORS configuration": "cors_rule",
"versioning": "versioning",
"lifecycle configuration": "lifecycle_rule",
}
if attributeName, ok := attributes[resource]; ok {
d.AttributePath = cty.GetAttrPath(attributeName)
}

*diags = append(*diags, d)
return true, true

default:
*diags = append(*diags, diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Errorf("couldn't read bucket %s: %w", resource, err).Error(),
})
return true, true
}
}
60 changes: 35 additions & 25 deletions scaleway/resource_object_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@ import (
"log"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/aws-sdk-go-base/tfawserr"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
Expand Down Expand Up @@ -452,14 +450,18 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
return diag.FromErr(err)
}

var diags diag.Diagnostics

_ = d.Set("name", bucketName)
_ = d.Set("region", region)

acl, err := s3Client.GetBucketAclWithContext(ctx, &s3.GetBucketAclInput{
Bucket: aws.String(bucketName),
})
if err != nil {
return diag.FromErr(fmt.Errorf("couldn't read bucket acl: %s", err))
if bucketFound, _ := addReadBucketErrorDiagnostic(&diags, err, "acl", ""); !bucketFound {
return diags
}
}
_ = d.Set("project_id", normalizeOwnerID(acl.Owner.ID))

Expand All @@ -468,15 +470,13 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
Bucket: aws.String(bucketName),
})
if err != nil {
switch {
case isS3Err(err, ErrCodeObjectLockConfigurationNotFoundError, ""):
_ = d.Set("object_lock_enabled", false)
case isS3Err(err, s3.ErrCodeNoSuchBucket, ""):
tflog.Error(ctx, fmt.Sprintf("Bucket %q was not found - removing from state!", bucketName))
bucketFound, objectLockFound := addReadBucketErrorDiagnostic(&diags, err, "object lock configuration", ErrCodeObjectLockConfigurationNotFoundError)
if !bucketFound {
d.SetId("")
return nil
default:
return diag.FromErr(fmt.Errorf("couldn't read bucket: %s", err))
return diags
}
if !objectLockFound {
_ = d.Set("object_lock_enabled", false)
}
} else if objectLockConfiguration.ObjectLockConfiguration != nil {
_ = d.Set("object_lock_enabled", true)
Expand All @@ -495,12 +495,10 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
Bucket: scw.StringPtr(bucketName),
})
if err != nil {
if s3err, ok := err.(awserr.Error); ok && s3err.Code() == s3.ErrCodeNoSuchBucket {
tflog.Error(ctx, fmt.Sprintf("Bucket %q was not found - removing from state!", bucketName))
if bucketFound, _ := addReadBucketErrorDiagnostic(&diags, err, "objects", ""); !bucketFound {
d.SetId("")
return nil
return diags
}
return diag.FromErr(fmt.Errorf("couldn't read bucket: %s", err))
}

var tagsSet []*s3.Tag
Expand All @@ -509,8 +507,9 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
Bucket: scw.StringPtr(bucketName),
})
if err != nil {
if s3err, ok := err.(awserr.Error); !ok || s3err.Code() != ErrCodeNoSuchTagSet {
return diag.FromErr(fmt.Errorf("couldn't read tags from bucket: %s", err))
if bucketFound, _ := addReadBucketErrorDiagnostic(&diags, err, "tags", ErrCodeNoSuchTagSet); !bucketFound {
d.SetId("")
return diags
}
} else {
tagsSet = tagsResponse.TagSet
Expand All @@ -525,9 +524,11 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
corsResponse, err := s3Client.GetBucketCorsWithContext(ctx, &s3.GetBucketCorsInput{
Bucket: scw.StringPtr(bucketName),
})

if err != nil && !isS3Err(err, ErrCodeNoSuchCORSConfiguration, "") {
return diag.FromErr(fmt.Errorf("error getting S3 Bucket CORS configuration: %s", err))
if err != nil {
if bucketFound, _ := addReadBucketErrorDiagnostic(&diags, err, "CORS configuration", ErrCodeNoSuchCORSConfiguration); !bucketFound {
d.SetId("")
return diags
}
}

_ = d.Set("cors_rule", flattenBucketCORS(corsResponse))
Expand All @@ -537,16 +538,22 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
Bucket: scw.StringPtr(bucketName),
})
if err != nil {
return diag.FromErr(err)
if bucketFound, _ := addReadBucketErrorDiagnostic(&diags, err, "versioning", ""); !bucketFound {
d.SetId("")
return diags
}
}
_ = d.Set("versioning", flattenObjectBucketVersioning(versioningResponse))

// Read the lifecycle configuration
lifecycle, err := s3Client.GetBucketLifecycleConfigurationWithContext(ctx, &s3.GetBucketLifecycleConfigurationInput{
Bucket: scw.StringPtr(bucketName),
})
if err != nil && !tfawserr.ErrMessageContains(err, ErrCodeNoSuchLifecycleConfiguration, "") {
return diag.FromErr(err)
if err != nil {
if bucketFound, _ := addReadBucketErrorDiagnostic(&diags, err, "lifecycle configuration", ErrCodeNoSuchLifecycleConfiguration); !bucketFound {
d.SetId("")
return diags
}
}

lifecycleRules := make([]map[string]interface{}, 0)
Expand Down Expand Up @@ -632,10 +639,13 @@ func resourceScalewayObjectBucketRead(ctx context.Context, d *schema.ResourceDat
}
}
if err := d.Set("lifecycle_rule", lifecycleRules); err != nil {
return diag.FromErr(fmt.Errorf("error setting lifecycle_rule: %s", err))
return append(diags, diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("error setting lifecycle_rule: %s", err),
})
}

return nil
return diags
}

func resourceScalewayObjectBucketDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
Expand Down
Loading