diff --git a/api/v1/condition.go b/api/v1/condition.go index e1b44296..686e255e 100644 --- a/api/v1/condition.go +++ b/api/v1/condition.go @@ -2,6 +2,7 @@ package v1 import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -100,6 +101,19 @@ func ReconcileErrorCondition(err error) metav1.Condition { ) } +func ReconcileSyncErrorCondition(err error) metav1.Condition { + message := "Reconciliation encountered an issue" + if err != nil { + message = fmt.Sprintf("%s: %v", message, err) + } + return CreateCondition( + ConditionSync, + metav1.ConditionFalse, + ReasonUnavailable, + message, + ) +} + func ReconcileSuccessCondition() metav1.Condition { return CreateCondition( ConditionReady, diff --git a/cmd/main.go b/cmd/main.go index 94960e09..4fc27c5f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "crypto/tls" "flag" "fmt" @@ -40,12 +41,12 @@ import ( "github.com/infobloxopen/db-controller/internal/controller" "github.com/infobloxopen/db-controller/internal/metrics" mutating "github.com/infobloxopen/db-controller/internal/webhook" + webhookpersistancev1 "github.com/infobloxopen/db-controller/internal/webhook/v1" "github.com/infobloxopen/db-controller/pkg/config" "github.com/infobloxopen/db-controller/pkg/databaseclaim" "github.com/infobloxopen/db-controller/pkg/rdsauth" "github.com/infobloxopen/db-controller/pkg/roleclaim" - webhookpersistancev1 "github.com/infobloxopen/db-controller/internal/webhook/v1" // +kubebuilder:scaffold:imports crossplanerdsv1alpha1 "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" crossplanegcpv1beta2 "github.com/upbound/provider-gcp/apis/alloydb/v1beta2" @@ -75,6 +76,7 @@ func main() { var probeAddr string var secureMetrics bool var enableHTTP2 bool + var enableLabelPropagation bool var tlsOpts []func(*tls.Config) flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") @@ -87,6 +89,8 @@ func main() { "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") flag.BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + flag.BoolVar(&enableLabelPropagation, "enable-label-propagation", false, + "Enable the propagation of DatabaseClaim labels to DBInstance objects") var class string var configFile string @@ -237,7 +241,6 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "DbRoleClaim") os.Exit(1) } - if err = webhookpersistancev1.SetupDatabaseClaimWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseClaim") os.Exit(1) @@ -247,6 +250,13 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "DbRoleClaim") os.Exit(1) } + if err := (&controller.DBInstanceStatusReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DBInstanceStatus") + os.Exit(1) + } // +kubebuilder:scaffold:builder @@ -289,9 +299,22 @@ func main() { ctx := ctrl.SetupSignalHandler() go metrics.StartUpdater(ctx, mgr.GetClient()) + // Start the manager. setupLog.Info("starting manager") - if err := mgr.Start(ctx); err != nil { + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } + + // Start label propagation logic if enabled. + if enableLabelPropagation { + setupLog.Info("starting label propagation for DBInstances") + go func() { + if err := controller.SyncDBInstances(context.Background(), ctlConfig, mgr.GetClient(), setupLog); err != nil { + setupLog.Error(err, "failed to propagate labels for dbinstances") + } else { + setupLog.Info("label propagation completed successfully") + } + }() + } } diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5ab7d1a0..5050e9b6 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -11,6 +11,14 @@ rules: verbs: - create - patch +- apiGroups: + - database.aws.crossplane.io + resources: + - dbinstances + verbs: + - get + - list + - watch - apiGroups: - persistance.atlas.infoblox.com resources: diff --git a/internal/controller/databaseclaim_controller_test.go b/internal/controller/databaseclaim_controller_test.go index bc9da224..57504e8e 100644 --- a/internal/controller/databaseclaim_controller_test.go +++ b/internal/controller/databaseclaim_controller_test.go @@ -291,9 +291,8 @@ var _ = Describe("DatabaseClaim Controller", func() { Expect(k8sClient.Get(ctx, typeNamespacedName, resource)).NotTo(HaveOccurred()) resource.Labels = map[string]string{ - "app.kubernetes.io/component": resource.Labels["app.kubernetes.io/component"], - "app.kubernetes.io/instance": resource.Labels["app.kubernetes.io/instance"], - "app.kubernetes.io/name": resource.Labels["app.kubernetes.io/name"], + "app.kubernetes.io/dbclaim-name": resource.Name, + "app.kubernetes.io/dbclaim-namespace": resource.Namespace, } Expect(k8sClient.Update(ctx, resource)).To(Succeed()) diff --git a/internal/controller/dbinstance_labels.go b/internal/controller/dbinstance_labels.go new file mode 100644 index 00000000..4e907463 --- /dev/null +++ b/internal/controller/dbinstance_labels.go @@ -0,0 +1,98 @@ +package controller + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + v1 "github.com/infobloxopen/db-controller/api/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" + crossplaneaws "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" + "github.com/spf13/viper" +) + +// SyncDBInstances ensures that each DBInstance has the appropriate dbclaim-name and dbclaim-namespace labels. +func SyncDBInstances(ctx context.Context, viper *viper.Viper, kubeClient client.Client, logger logr.Logger) error { + logger.Info("starting synchronization of DBInstance labels") + + // List all DBInstances. + var dbInstances crossplaneaws.DBInstanceList + if err := kubeClient.List(ctx, &dbInstances); err != nil { + return fmt.Errorf("error listing DBInstances: %w", err) + } + + // Process each DBInstance. + for _, dbInstance := range dbInstances.Items { + instanceLogger := logger.WithValues("DBInstanceName", dbInstance.Name) + instanceLogger.Info("processing DBInstance") + + // Extract dbclaim-name and dbclaim-namespace from existing DBInstance labels. + dbClaimName := dbInstance.Labels["app.kubernetes.io/dbclaim-name"] + dbClaimNamespace := dbInstance.Labels["app.kubernetes.io/dbclaim-namespace"] + + // Update labels if any are missing. + if dbClaimName == "" || dbClaimNamespace == "" { + instanceLogger.Info("dbclaim-name or dbclaim-namespace labels are missing; proceeding to update") + + newLabels := map[string]string{ + "app.kubernetes.io/dbclaim-name": dbInstance.Name, + "app.kubernetes.io/dbclaim-namespace": "default", + } + if err := updateDBInstanceLabels(ctx, kubeClient, &dbInstance, newLabels, instanceLogger); err != nil { + instanceLogger.Error(err, "failed to update labels for DBInstance") + continue + } + } else { + instanceLogger.Info("DBInstance has valid dbclaim-name and dbclaim-namespace labels; no update necessary") + } + + // Check if the DatabaseClaim exists. + var dbClaim v1.DatabaseClaim + if err := kubeClient.Get(ctx, client.ObjectKey{Name: dbClaimName, Namespace: dbClaimNamespace}, &dbClaim); err != nil { + instanceLogger.Error(err, "DatabaseClaim not found for DBInstance") + return fmt.Errorf("error updating DBInstance %s: %w", dbInstance.Name, err) + } + + instanceLogger.Info("labels updated successfully") + } + + logger.Info("synchronization of DBInstance labels completed successfully") + return nil +} + +// updateDBInstanceLabels updates the labels of a DBInstance while preserving existing ones. +func updateDBInstanceLabels(ctx context.Context, kubeClient client.Client, dbInstance *v1alpha1.DBInstance, newLabels map[string]string, logger logr.Logger) error { + logger.Info("starting update of DBInstance labels") + + if dbInstance.Labels == nil { + logger.Info("DBInstance has no labels; initializing") + dbInstance.Labels = make(map[string]string) + } + + updated := false + + // Update or add new labels. + for key, value := range newLabels { + if oldValue, exists := dbInstance.Labels[key]; exists && oldValue == value { + continue + } + dbInstance.Labels[key] = value + updated = true + logger.Info("label added or updated", "key", key, "value", value) + } + + if !updated { + logger.Info("no label updates required for DBInstance") + return nil + } + + // Apply the updated labels to the DBInstance. + logger.Info("applying updated labels to DBInstance", "updatedLabels", dbInstance.Labels) + if err := kubeClient.Update(ctx, dbInstance); err != nil { + return fmt.Errorf("error updating DBInstance labels: %w", err) + } + + return nil +} diff --git a/internal/controller/dbinstance_labels_test.go b/internal/controller/dbinstance_labels_test.go new file mode 100644 index 00000000..3c4f5f7d --- /dev/null +++ b/internal/controller/dbinstance_labels_test.go @@ -0,0 +1,204 @@ +package controller + +import ( + "context" + "path/filepath" + + "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" + persistencev1 "github.com/infobloxopen/db-controller/api/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/viper" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// Helper function to create string pointers. +func strPtr(s string) *string { + return &s +} + +var _ = Describe("DBInstance Labels Management with envtest", func() { + var ( + testEnv *envtest.Environment + k8sClient client.Client + testLogger = zap.New(zap.UseDevMode(true)) + ctx context.Context + cancel context.CancelFunc + ) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "test", "crd"), + }, + ErrorIfCRDPathMissing: true, + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + err = v1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = persistencev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + cancel() + Expect(testEnv.Stop()).To(Succeed()) + }) + + It("Should create or update dbclaim-name and dbclaim-namespace labels in DBInstance", func() { + // The idea is that if the DBInstance is missing labels, + // the function creates the labels "test-dbinstance"/"default". + // To avoid an error, we need to have a DatabaseClaim named "test-dbinstance". + + dbClaim := &persistencev1.DatabaseClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dbinstance", + Namespace: "default", + }, + Spec: persistencev1.DatabaseClaimSpec{ + DatabaseName: "sample-app", + SecretName: "test-secret", + }, + } + Expect(k8sClient.Create(ctx, dbClaim)).To(Succeed()) + + dbInstance := &v1alpha1.DBInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dbinstance", + }, + Spec: v1alpha1.DBInstanceSpec{ + ForProvider: v1alpha1.DBInstanceParameters{ + DBInstanceClass: strPtr("db.t3.micro"), + Engine: strPtr("postgres"), + }, + }, + } + Expect(k8sClient.Create(ctx, dbInstance)).To(Succeed()) + + viperInstance := viper.New() + viperInstance.Set("env", "test-env") + + // Executes the synchronization function + err := SyncDBInstances(ctx, viperInstance, k8sClient, testLogger) + Expect(err).NotTo(HaveOccurred()) + + // Checks if the labels were added + updatedDBInstance := &v1alpha1.DBInstance{} + Eventually(func() map[string]string { + _ = k8sClient.Get(ctx, client.ObjectKey{Name: "test-dbinstance"}, updatedDBInstance) + return updatedDBInstance.Labels + }, "10s", "1s").Should(HaveKeyWithValue("app.kubernetes.io/dbclaim-name", "test-dbinstance")) + Eventually(func() map[string]string { + _ = k8sClient.Get(ctx, client.ObjectKey{Name: "test-dbinstance"}, updatedDBInstance) + return updatedDBInstance.Labels + }, "10s", "1s").Should(HaveKeyWithValue("app.kubernetes.io/dbclaim-namespace", "default")) + }) + + It("Should not modify DBInstance if dbclaim-name and dbclaim-namespace already exist", func() { + // Here, we predefine the labels "existing-dbclaim"/"existing-namespace" + // and want to avoid a "not found" error. Therefore, we create a DatabaseClaim + // with this name/namespace. + + existingDBClaim := &persistencev1.DatabaseClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-dbclaim", + Namespace: "existing-namespace", + }, + Spec: persistencev1.DatabaseClaimSpec{ + DatabaseName: "some-db", + SecretName: "some-secret", + }, + } + Expect(k8sClient.Create(ctx, existingDBClaim)).To(Succeed()) + + dbInstance := &v1alpha1.DBInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dbinstance-existing", + Labels: map[string]string{ + "app.kubernetes.io/dbclaim-name": "existing-dbclaim", + "app.kubernetes.io/dbclaim-namespace": "existing-namespace", + "existing.label": "unchanged", + }, + }, + Spec: v1alpha1.DBInstanceSpec{ + ForProvider: v1alpha1.DBInstanceParameters{ + DBInstanceClass: strPtr("db.t3.micro"), + Engine: strPtr("postgres"), + }, + }, + } + Expect(k8sClient.Create(ctx, dbInstance)).To(Succeed()) + + viperInstance := viper.New() + viperInstance.Set("env", "test-env") + + // Executes the synchronization function. + err := SyncDBInstances(ctx, viperInstance, k8sClient, testLogger) + Expect(err).NotTo(HaveOccurred()) + + // Verifies if the labels were not overwritten. + updatedDBInstance := &v1alpha1.DBInstance{} + Eventually(func() map[string]string { + _ = k8sClient.Get(ctx, client.ObjectKey{Name: "test-dbinstance-existing"}, updatedDBInstance) + return updatedDBInstance.Labels + }, "10s", "1s").Should(HaveKeyWithValue("app.kubernetes.io/dbclaim-name", "existing-dbclaim")) + Eventually(func() map[string]string { + _ = k8sClient.Get(ctx, client.ObjectKey{Name: "test-dbinstance-existing"}, updatedDBInstance) + return updatedDBInstance.Labels + }, "10s", "1s").Should(HaveKeyWithValue("app.kubernetes.io/dbclaim-namespace", "existing-namespace")) + + // Ensures that "existing.label" was preserved. + Eventually(func() map[string]string { + _ = k8sClient.Get(ctx, client.ObjectKey{Name: "test-dbinstance-existing"}, updatedDBInstance) + return updatedDBInstance.Labels + }, "10s", "1s").Should(HaveKeyWithValue("existing.label", "unchanged")) + }) + + It("Should return an error if DatabaseClaim is missing", func() { + // Here, we do not manually define labels, + // so SyncDBInstances will create labels + // "dbclaim-name = test-dbinstance-missing-claim" + // "dbclaim-namespace = default" + // and try to fetch a DatabaseClaim with this name/namespace, + // which does not exist. + + dbInstance := &v1alpha1.DBInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dbinstance-missing-claim", + }, + Spec: v1alpha1.DBInstanceSpec{ + ForProvider: v1alpha1.DBInstanceParameters{ + DBInstanceClass: strPtr("db.t3.micro"), + Engine: strPtr("postgres"), + }, + }, + } + Expect(k8sClient.Create(ctx, dbInstance)).To(Succeed()) + + viperInstance := viper.New() + viperInstance.Set("env", "test-env") + + // Expects an error, since the DatabaseClaim does not exist. + err := SyncDBInstances(ctx, viperInstance, k8sClient, testLogger) + Expect(err).To(HaveOccurred()) + + // Instead of checking "labels missing", verifies the actual error message. + Expect(err.Error()).To(ContainSubstring("DatabaseClaim not found for DBInstance")) + }) + +}) diff --git a/internal/controller/dbinstance_status_controller.go b/internal/controller/dbinstance_status_controller.go new file mode 100644 index 00000000..383fb745 --- /dev/null +++ b/internal/controller/dbinstance_status_controller.go @@ -0,0 +1,162 @@ +package controller + +import ( + "context" + "errors" + "fmt" + + crossplaneaws "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/go-logr/logr" + v1 "github.com/infobloxopen/db-controller/api/v1" + "github.com/infobloxopen/db-controller/pkg/databaseclaim" + statusmanager "github.com/infobloxopen/db-controller/pkg/databaseclaim" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// DBInstanceStatusReconciler reconciles the status of DBInstance resources with DatabaseClaims. +type DBInstanceStatusReconciler struct { + client.Client + Scheme *runtime.Scheme + StatusManager *statusmanager.StatusManager +} + +// RBAC markers +// +kubebuilder:rbac:groups=database.aws.crossplane.io,resources=dbinstances,verbs=get;list;watch +// +kubebuilder:rbac:groups=persistance.atlas.infoblox.com,resources=databaseclaims,verbs=get;list;watch +// +kubebuilder:rbac:groups=persistance.atlas.infoblox.com,resources=databaseclaims/status,verbs=get;update;patch + +// Reconcile reconciles DBInstance with its corresponding DatabaseClaim. +func (r *DBInstanceStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("starting DBInstance status reconciliation", "DBInstance", req.NamespacedName) + + dbInstance, err := r.getDBInstance(ctx, req) + if err != nil { + return ctrl.Result{}, err + } + + dbClaimRef, err := r.getDBClaimRefFromDBInstance(dbInstance, logger) + if err != nil { + return ctrl.Result{}, err + } + + dbClaim, err := r.getDatabaseClaim(ctx, dbClaimRef) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.updateDatabaseClaimStatus(ctx, dbInstance, dbClaim, logger); err != nil { + return ctrl.Result{}, err + } + + logger.Info("reconciliation complete", "DBInstance", req.NamespacedName) + return ctrl.Result{}, nil +} + +// getDBInstance retrieves the DBInstance resource. +func (r *DBInstanceStatusReconciler) getDBInstance(ctx context.Context, req ctrl.Request) (*crossplaneaws.DBInstance, error) { + var dbInstance crossplaneaws.DBInstance + if err := r.Get(ctx, req.NamespacedName, &dbInstance); err != nil { + return nil, fmt.Errorf("failed to get DBInstance: %w", err) + } + return &dbInstance, nil +} + +// getDBClaimRefLabelsFromDBInstance extracts the DBClaim labels from the DBInstance. +func (r *DBInstanceStatusReconciler) getDBClaimRefFromDBInstance(dbInstance *crossplaneaws.DBInstance, logger logr.Logger) (*types.NamespacedName, error) { + labels := dbInstance.GetLabels() + if labels == nil { + logger.Error(errors.New("missing labels"), "DBInstance has no labels", "DBInstance", dbInstance.Name) + return nil, fmt.Errorf("DBInstance %s has no labels", dbInstance.Name) + } + + dbClaimName, nameExists := labels["app.kubernetes.io/dbclaim-name"] + dbClaimNamespace, namespaceExists := labels["app.kubernetes.io/dbclaim-namespace"] + + if !nameExists || !namespaceExists { + err := errors.New("missing required labels app.kubernetes.io/dbclaim-name or app.kubernetes.io/dbclaim-namespace") + logger.Error(err, "DBInstance is missing required labels", "DBInstance", dbInstance.Name) + return nil, err + } + + dbClaimNSName := types.NamespacedName{ + Name: dbClaimName, + Namespace: dbClaimNamespace, + } + + return &dbClaimNSName, nil +} + +// fetchDatabaseClaim retrieves the DatabaseClaim resource. +func (r *DBInstanceStatusReconciler) getDatabaseClaim(ctx context.Context, dbClaimRef *types.NamespacedName) (*v1.DatabaseClaim, error) { + var dbClaim v1.DatabaseClaim + if err := r.Get(ctx, *dbClaimRef, &dbClaim); err != nil { + return nil, fmt.Errorf("failed to get DatabaseClaim: %w", err) + } + return &dbClaim, nil +} + +// updateDatabaseClaimStatus propagates labels from a DatabaseClaim to a DBInstance. +// and updates the DatabaseClaim status based on the DBInstance conditions. +func (r *DBInstanceStatusReconciler) updateDatabaseClaimStatus(ctx context.Context, dbInstance *crossplaneaws.DBInstance, dbClaim *v1.DatabaseClaim, logger logr.Logger) error { + if len(dbInstance.Status.Conditions) == 0 { + logger.Info("DBInstance has no conditions", "DBInstance", dbInstance.Name) + return nil + } + + var conditionSyncedAtProvider, conditionReadyAtProvider metav1.Condition + + // Retrieve the conditions from the DBInstance status. + for _, condition := range dbInstance.Status.Conditions { + switch condition.Type { + case xpv1.TypeSynced: + conditionSyncedAtProvider = v1.CreateCondition( + v1.ConditionSync, + metav1.ConditionStatus(condition.Status), + string(condition.Reason), + condition.Message, + ) + case xpv1.TypeReady: + conditionReadyAtProvider = v1.CreateCondition( + v1.ConditionReady, + metav1.ConditionStatus(condition.Status), + string(condition.Reason), + condition.Message, + ) + } + } + + newLabels := databaseclaim.PropagateLabels(dbClaim) + if err := updateDBInstanceLabels(ctx, r.Client, dbInstance, newLabels, logger); err != nil { + logger.Error(err, "failed to update labels for DBInstance", "DBInstance", dbInstance.Name) + return err + } + + if conditionReadyAtProvider.Status == metav1.ConditionTrue && conditionSyncedAtProvider.Status == metav1.ConditionTrue { + if err := r.StatusManager.SetConditionAndUpdateStatus(ctx, dbClaim, v1.DatabaseReadyCondition()); err != nil { + logger.Error(err, "failed to set success condition in DatabaseClaim", "DatabaseClaim", dbClaim.Name) + return err + } + return nil + } + + errorCondition := v1.ReconcileSyncErrorCondition(fmt.Errorf("%s: %s", conditionSyncedAtProvider.Reason, conditionSyncedAtProvider.Message)) + if err := r.StatusManager.SetConditionAndUpdateStatus(ctx, dbClaim, errorCondition); err != nil { + logger.Error(err, "failed to set error condition in DatabaseClaim", "DatabaseClaim", dbClaim.Name) + return err + } + return nil +} + +// SetupWithManager configures the controller with the Manager. +func (r *DBInstanceStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&crossplaneaws.DBInstance{}). + Complete(r) +} diff --git a/internal/controller/dbinstance_status_controller_test.go b/internal/controller/dbinstance_status_controller_test.go new file mode 100644 index 00000000..c9638505 --- /dev/null +++ b/internal/controller/dbinstance_status_controller_test.go @@ -0,0 +1,176 @@ +package controller + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/ptr" + "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + persistancev1 "github.com/infobloxopen/db-controller/api/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("DBInstanceStatusReconciler", func() { + var ( + dbInstance *v1alpha1.DBInstance + dbClaim *persistancev1.DatabaseClaim + ) + + BeforeEach(func() { + // Create a DBInstance resource with required labels. + dbInstance = &v1alpha1.DBInstance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "default-dbinstance", + Labels: map[string]string{ + "app.kubernetes.io/dbclaim-name": "default-dbclaim", + "app.kubernetes.io/dbclaim-namespace": "default", + }, + }, + Spec: v1alpha1.DBInstanceSpec{ + ForProvider: v1alpha1.DBInstanceParameters{ + DBInstanceClass: ptr.String("db.t2.micro"), + Engine: ptr.String("postgres"), + }, + }, + } + Expect(k8sClient.Create(context.Background(), dbInstance)).To(Succeed()) + + // Create a corresponding DatabaseClaim resource. + dbClaim = &persistancev1.DatabaseClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "default-dbclaim", + }, + Spec: persistancev1.DatabaseClaimSpec{ + DatabaseName: "sample-app", + SecretName: "default-secret", + }, + } + Expect(k8sClient.Create(context.Background(), dbClaim)).To(Succeed()) + }) + + // Clean up resources after each test case. + AfterEach(func() { + Expect(k8sClient.Delete(context.Background(), dbInstance)).To(Succeed()) + Expect(k8sClient.Delete(context.Background(), dbClaim)).To(Succeed()) + }) + + It("Should reconcile and update the DatabaseClaim status to Synced=True", func() { + // Retrieve the created DBInstance and update its status. + var dbInstanceObjKey = types.NamespacedName{ + Namespace: "default", + Name: "default-dbinstance", + } + var updatedInstance v1alpha1.DBInstance + Expect(k8sClient.Get(context.Background(), dbInstanceObjKey, &updatedInstance)).To(Succeed()) + + // Set the DBInstance conditions to indicate it is synced and ready. + updatedInstance.Status.Conditions = []v1.Condition{ + { + Type: v1.TypeSynced, + Status: corev1.ConditionTrue, + Reason: v1.ReasonAvailable, + Message: "DBInstance is synced", + LastTransitionTime: metav1.Now(), + }, + { + Type: v1.TypeReady, + Status: corev1.ConditionTrue, + Reason: v1.ReasonAvailable, + Message: "DBInstance is ready", + LastTransitionTime: metav1.Now(), + }, + } + Expect(k8sClient.Status().Update(context.Background(), &updatedInstance)).To(Succeed()) + + // Trigger the reconcile loop. + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "default-dbinstance", + }, + } + _, err := statusControllerReconciler.Reconcile(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + + // Retrieve the updated DatabaseClaim and check its status. + dbClaimObjKey := types.NamespacedName{ + Namespace: "default", + Name: "default-dbclaim", + } + var updatedClaim persistancev1.DatabaseClaim + Expect(k8sClient.Get(context.Background(), dbClaimObjKey, &updatedClaim)).To(Succeed()) + + // Verify that the Synced condition is set to True. + condition := FindCondition(updatedClaim.Status.Conditions, "Synced") + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(condition.Reason).To(Equal("Available")) + Expect(condition.Message).To(Equal("Database is provisioned.")) + }) + + It("Should update DatabaseClaim status to Synced=False", func() { + var dbInstanceObjKey = types.NamespacedName{ + Namespace: "default", + Name: "default-dbinstance", + } + var updatedInstance v1alpha1.DBInstance + Expect(k8sClient.Get(context.Background(), dbInstanceObjKey, &updatedInstance)).To(Succeed()) + + updatedInstance.Status.Conditions = []v1.Condition{ + { + Type: v1.TypeSynced, + Status: corev1.ConditionFalse, + Reason: "SyncFailed", + Message: "DBInstance synchronization failed", + LastTransitionTime: metav1.Now(), + }, + } + Expect(k8sClient.Status().Update(context.Background(), &updatedInstance)).To(Succeed()) + + // Trigger the reconcile loop. + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "default-dbinstance", + }, + } + _, err := statusControllerReconciler.Reconcile(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + + // Retrieve the updated DatabaseClaim and check its status. + dbClaimObjKey := types.NamespacedName{ + Namespace: "default", + Name: "default-dbclaim", + } + var updatedClaim persistancev1.DatabaseClaim + Expect(k8sClient.Get(context.Background(), dbClaimObjKey, &updatedClaim)).To(Succeed()) + + // Verify that the Synced condition is set to False. + condition := FindCondition(updatedClaim.Status.Conditions, "Synced") + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("Unavailable")) + Expect(condition.Message).To(Equal("Reconciliation encountered an issue: SyncFailed: DBInstance synchronization failed")) + }) +}) + +// Helper function to find a condition by type in a list of conditions. +func FindCondition(conditions []metav1.Condition, condType string) *metav1.Condition { + for _, cond := range conditions { + if cond.Type == condType { + return &cond + } + } + + // Fail the test if the condition is not found. + Fail(fmt.Sprintf("Condition %s not found", condType)) + return nil +} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index d09e14fc..a93655ce 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -19,6 +19,7 @@ package controller import ( "database/sql" "fmt" + "os" "path/filepath" "runtime" "testing" @@ -54,6 +55,7 @@ var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment var controllerReconciler *DatabaseClaimReconciler +var statusControllerReconciler *DBInstanceStatusReconciler var namespace string var logger logr.Logger var env = "testenv" @@ -136,6 +138,7 @@ var _ = BeforeSuite(func() { Password: "postgres", DockerTag: "15", }) + Expect(testdb.Ping()).To(Succeed(), "Database connection failed") logger.Info("postgres_setup_took", "duration", time.Since(now)) // Mock table for testing migrations @@ -151,10 +154,13 @@ var _ = BeforeSuite(func() { By("setting up the database controller") configPath, err := filepath.Abs(filepath.Join("..", "..", "cmd", "config", "config.yaml")) Expect(err).NotTo(HaveOccurred()) + _, err = os.Stat(configPath) + Expect(err).NotTo(HaveOccurred(), "Configuration file not found at path: %s", configPath) viperCfg := config.NewConfig(configPath) // Used by kctlutils viperCfg.Set("SERVICE_NAMESPACE", "default") + Expect(viperCfg).NotTo(BeNil(), "Failed to initialize Viper configuration") controllerReconciler = &DatabaseClaimReconciler{ Config: &databaseclaim.DatabaseClaimConfig{ Viper: viperCfg, @@ -169,6 +175,11 @@ var _ = BeforeSuite(func() { controllerReconciler.Config.Viper.Set("defaultMasterusername", "postgres") controllerReconciler.Config.Viper.Set("defaultSslMode", "disable") + statusControllerReconciler = &DBInstanceStatusReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + StatusManager: databaseclaim.NewStatusManager(k8sClient, viperCfg), + } }) var _ = AfterSuite(func() { diff --git a/pkg/databaseclaim/awsprovider.go b/pkg/databaseclaim/awsprovider.go index 3f7f77cd..14b06f4e 100644 --- a/pkg/databaseclaim/awsprovider.go +++ b/pkg/databaseclaim/awsprovider.go @@ -242,7 +242,10 @@ func (r *DatabaseClaimReconciler) managePostgresDBInstanceAWS(ctx context.Contex } - labels := propagateLabels(dbClaim.Labels) + labels := map[string]string{ + "app.kubernetes.io/dbclaim-name": dbClaim.Name, + "app.kubernetes.io/dbclaim-namespace": dbClaim.Namespace, + } err = r.Client.Get(ctx, client.ObjectKey{ Name: dbHostName, @@ -399,7 +402,10 @@ func (r *DatabaseClaimReconciler) manageAuroraDBInstance(ctx context.Context, re dbClaim.Spec.Tags = r.configureBackupPolicy(dbClaim.Spec.BackupPolicy, dbClaim.Spec.Tags) - labels := propagateLabels(dbClaim.Labels) + labels := map[string]string{ + "app.kubernetes.io/dbclaim-name": dbClaim.Name, + "app.kubernetes.io/dbclaim-namespace": dbClaim.Namespace, + } err = r.Client.Get(ctx, client.ObjectKey{ Name: dbHostName, @@ -457,6 +463,12 @@ func (r *DatabaseClaimReconciler) manageAuroraDBInstance(ctx context.Context, re return false, err } + dbInstance.Spec.ForProvider.Tags = ReplaceOrAddTag( + DBClaimTags(dbClaim.Spec.Tags).DBTags(), + OperationalStatusTagKey, + OperationalStatusActiveValue, + ) + if dbClaim.Spec.PreferredMaintenanceWindow != nil { dbInstance.Spec.ForProvider.PreferredMaintenanceWindow = dbClaim.Spec.PreferredMaintenanceWindow } diff --git a/pkg/databaseclaim/gcpprovider.go b/pkg/databaseclaim/gcpprovider.go index 7ae43cc1..ff00ebbf 100644 --- a/pkg/databaseclaim/gcpprovider.go +++ b/pkg/databaseclaim/gcpprovider.go @@ -176,7 +176,10 @@ func (r *DatabaseClaimReconciler) manageDBClusterGCP(ctx context.Context, reqInf dbClaim.Spec.Tags = r.configureBackupPolicy(dbClaim.Spec.BackupPolicy, dbClaim.Spec.Tags) - labels := propagateLabels(dbClaim.Labels) + labels := map[string]string{ + "app.kubernetes.io/dbclaim-name": dbClaim.Name, + "app.kubernetes.io/dbclaim-namespace": dbClaim.Namespace, + } err = r.Client.Get(ctx, client.ObjectKey{ Name: dbHostName, @@ -305,7 +308,10 @@ func (r *DatabaseClaimReconciler) managePostgresDBInstanceGCP(ctx context.Contex dbClaim.Spec.Tags = r.configureBackupPolicy(dbClaim.Spec.BackupPolicy, dbClaim.Spec.Tags) - labels := propagateLabels(dbClaim.Labels) + labels := map[string]string{ + "app.kubernetes.io/dbclaim-name": dbClaim.Name, + "app.kubernetes.io/dbclaim-namespace": dbClaim.Namespace, + } err = r.Client.Get(ctx, client.ObjectKey{ Name: dbHostName, diff --git a/pkg/databaseclaim/labels.go b/pkg/databaseclaim/labels.go index 34652223..6ca7d34b 100644 --- a/pkg/databaseclaim/labels.go +++ b/pkg/databaseclaim/labels.go @@ -1,19 +1,37 @@ package databaseclaim -func propagateLabels(dbClaimLabels map[string]string) map[string]string { - keysToPropagate := []string{ +import v1 "github.com/infobloxopen/db-controller/api/v1" + +// PropagateLabels creates and updates the labels 'app.kubernetes.io/dbclaim-name' +// and 'app.kubernetes.io/dbclaim-namespace' for a DBInstance from a DatabaseClaim. +func PropagateLabels(dbClaim *v1.DatabaseClaim) map[string]string { + // Specific labels from the DatabaseClaim that we want to preserve. + dbClaimSpecificLabels := []string{ "app.kubernetes.io/component", "app.kubernetes.io/instance", "app.kubernetes.io/name", } + // DBInstance-specific labels that we want to add/update. + dbInstanceSpecificLabels := map[string]string{ + "app.kubernetes.io/dbclaim-name": dbClaim.Name, + "app.kubernetes.io/dbclaim-namespace": dbClaim.Namespace, + } + + // Combine both types of labels. labels := make(map[string]string) - for _, key := range keysToPropagate { - if value, exists := dbClaimLabels[key]; exists { + // Add specific labels from the DatabaseClaim. + for _, key := range dbClaimSpecificLabels { + if value, exists := dbClaim.Labels[key]; exists { labels[key] = value } } + // Add/update DBInstance-specific labels. + for key, value := range dbInstanceSpecificLabels { + labels[key] = value + } + return labels }