From d3020fb0fc497f04cd2fc1eda8d4d77ea06d2fe5 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Wed, 29 Nov 2023 17:25:35 +0100 Subject: [PATCH 1/8] feat: import external Camel applications --- .../namespaced/operator-role-knative.yaml | 8 + pkg/apis/camel/v1/integration_types.go | 18 +- .../camel/v1/integration_types_support.go | 15 ++ pkg/cmd/operator/operator.go | 1 - pkg/controller/integration/initialize.go | 87 ++++++ .../integration/integration_controller.go | 173 ++++++++---- .../integration_controller_import.go | 249 ++++++++++++++++++ pkg/controller/integration/monitor.go | 54 +++- pkg/controller/integration/monitor_cronjob.go | 16 ++ .../integration/monitor_deployment.go | 13 + pkg/controller/integration/monitor_knative.go | 17 ++ .../integration/monitor_synthetic.go | 70 +++++ pkg/controller/integration/predicate.go | 37 +++ pkg/trait/camel.go | 3 +- pkg/trait/platform.go | 3 +- pkg/trait/trait.go | 71 ++++- 16 files changed, 770 insertions(+), 65 deletions(-) create mode 100644 pkg/controller/integration/integration_controller_import.go create mode 100644 pkg/controller/integration/monitor_synthetic.go diff --git a/config/rbac/namespaced/operator-role-knative.yaml b/config/rbac/namespaced/operator-role-knative.yaml index 3cba80931b..7e1d2f3492 100644 --- a/config/rbac/namespaced/operator-role-knative.yaml +++ b/config/rbac/namespaced/operator-role-knative.yaml @@ -35,6 +35,14 @@ rules: - patch - update - watch +- apiGroups: + - serving.knative.dev + resources: + - revisions + verbs: + - get + - list + - watch - apiGroups: - eventing.knative.dev resources: diff --git a/pkg/apis/camel/v1/integration_types.go b/pkg/apis/camel/v1/integration_types.go index 78dd40a8cd..9bcecaad2f 100644 --- a/pkg/apis/camel/v1/integration_types.go +++ b/pkg/apis/camel/v1/integration_types.go @@ -155,7 +155,13 @@ const ( IntegrationPhaseRunning IntegrationPhase = "Running" // IntegrationPhaseError --. IntegrationPhaseError IntegrationPhase = "Error" + // IntegrationPhaseImportMissing used when the application from which the Integration is imported has been deleted. + IntegrationPhaseImportMissing IntegrationPhase = "Application Missing" + // IntegrationPhaseCannotMonitor used when the application from which the Integration has not enough information to monitor its pods. + IntegrationPhaseCannotMonitor IntegrationPhase = "Cannot Monitor Pods" + // IntegrationConditionReady --. + IntegrationConditionReady IntegrationConditionType = "Ready" // IntegrationConditionKitAvailable --. IntegrationConditionKitAvailable IntegrationConditionType = "IntegrationKitAvailable" // IntegrationConditionPlatformAvailable --. @@ -178,10 +184,11 @@ const ( IntegrationConditionJolokiaAvailable IntegrationConditionType = "JolokiaAvailable" // IntegrationConditionProbesAvailable --. IntegrationConditionProbesAvailable IntegrationConditionType = "ProbesAvailable" - // IntegrationConditionReady --. - IntegrationConditionReady IntegrationConditionType = "Ready" // IntegrationConditionTraitInfo --. IntegrationConditionTraitInfo IntegrationConditionType = "TraitInfo" + // IntegrationConditionMonitoringPodsAvailable used to specify that the Pods generated are available for monitoring. + IntegrationConditionMonitoringPodsAvailable IntegrationConditionType = "MonitoringPodsAvailable" + // IntegrationConditionKitAvailableReason --. IntegrationConditionKitAvailableReason string = "IntegrationKitAvailable" // IntegrationConditionPlatformAvailableReason --. @@ -220,7 +227,8 @@ const ( IntegrationConditionJolokiaAvailableReason string = "JolokiaAvailable" // IntegrationConditionProbesAvailableReason --. IntegrationConditionProbesAvailableReason string = "ProbesAvailable" - + // IntegrationConditionMonitoringPodsAvailableReason used to specify that the Pods generated are available for monitoring. + IntegrationConditionMonitoringPodsAvailableReason string = "MonitoringPodsAvailable" // IntegrationConditionKnativeServiceReadyReason --. IntegrationConditionKnativeServiceReadyReason string = "KnativeServiceReady" // IntegrationConditionDeploymentReadyReason --. @@ -239,18 +247,18 @@ const ( IntegrationConditionRuntimeNotReadyReason string = "RuntimeNotReady" // IntegrationConditionErrorReason --. IntegrationConditionErrorReason string = "Error" - // IntegrationConditionInitializationFailedReason --. IntegrationConditionInitializationFailedReason string = "InitializationFailed" // IntegrationConditionUnsupportedLanguageReason --. IntegrationConditionUnsupportedLanguageReason string = "UnsupportedLanguage" - // IntegrationConditionKameletsAvailable --. IntegrationConditionKameletsAvailable IntegrationConditionType = "KameletsAvailable" // IntegrationConditionKameletsAvailableReason --. IntegrationConditionKameletsAvailableReason string = "KameletsAvailable" // IntegrationConditionKameletsNotAvailableReason --. IntegrationConditionKameletsNotAvailableReason string = "KameletsNotAvailable" + // IntegrationConditionImportingKindAvailableReason used (as false) if we're trying to import an unsupported kind. + IntegrationConditionImportingKindAvailableReason string = "ImportingKindAvailable" ) // IntegrationCondition describes the state of a resource at a certain point. diff --git a/pkg/apis/camel/v1/integration_types_support.go b/pkg/apis/camel/v1/integration_types_support.go index ef24e207ba..3342be76a6 100644 --- a/pkg/apis/camel/v1/integration_types_support.go +++ b/pkg/apis/camel/v1/integration_types_support.go @@ -25,8 +25,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// IntegrationLabel is used to tag k8s object created by a given Integration. const IntegrationLabel = "camel.apache.org/integration" +// IntegrationSyntheticLabel is used to tag k8s synthetic Integrations. +const IntegrationSyntheticLabel = "camel.apache.org/is-synthetic" + +// IntegrationImportedKindLabel specifies from what kind of resource an Integration was imported. +const IntegrationImportedKindLabel = "camel.apache.org/imported-from-kind" + +// IntegrationImportedNameLabel specifies from what resource an Integration was imported. +const IntegrationImportedNameLabel = "camel.apache.org/imported-from-name" + func NewIntegration(namespace string, name string) Integration { return Integration{ TypeMeta: metav1.TypeMeta{ @@ -283,6 +293,11 @@ func (in *Integration) SetReadyConditionError(err string) { in.SetReadyCondition(corev1.ConditionFalse, IntegrationConditionErrorReason, err) } +// IsSynthetic returns true for synthetic Integrations (non managed, likely imported from external deployments). +func (in *Integration) IsSynthetic() bool { + return in.Annotations[IntegrationSyntheticLabel] == "true" +} + // GetCondition returns the condition with the provided type. func (in *IntegrationStatus) GetCondition(condType IntegrationConditionType) *IntegrationCondition { for i := range in.Conditions { diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go index 04b5ea8b23..ab59ab638e 100644 --- a/pkg/cmd/operator/operator.go +++ b/pkg/cmd/operator/operator.go @@ -188,7 +188,6 @@ func Run(healthPort, monitoringPort int32, leaderElection bool, leaderElectionID selector := labels.NewSelector().Add(*hasIntegrationLabel) selectors := map[ctrl.Object]cache.ByObject{ - &corev1.Pod{}: {Label: selector}, &appsv1.Deployment{}: {Label: selector}, &batchv1.Job{}: {Label: selector}, &servingv1.Service{}: {Label: selector}, diff --git a/pkg/controller/integration/initialize.go b/pkg/controller/integration/initialize.go index a08dd28c66..ad8891647f 100644 --- a/pkg/controller/integration/initialize.go +++ b/pkg/controller/integration/initialize.go @@ -19,6 +19,7 @@ package integration import ( "context" + "fmt" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -53,6 +54,10 @@ func (action *initializeAction) CanHandle(integration *v1.Integration) bool { func (action *initializeAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) { action.L.Info("Initializing Integration") + if integration.Annotations[v1.IntegrationImportedNameLabel] != "" { + return action.importFromExternalApp(integration) + } + if _, err := trait.Apply(ctx, action.client, integration, nil); err != nil { integration.Status.Phase = v1.IntegrationPhaseError integration.SetReadyCondition(corev1.ConditionFalse, @@ -91,3 +96,85 @@ func (action *initializeAction) Handle(ctx context.Context, integration *v1.Inte return integration, nil } + +func (action *initializeAction) importFromExternalApp(integration *v1.Integration) (*v1.Integration, error) { + readyMessage := fmt.Sprintf( + "imported from %s %s", + integration.Annotations[v1.IntegrationImportedNameLabel], + integration.Annotations[v1.IntegrationImportedKindLabel], + ) + // We need to set the condition for which this Integration is imported (required later by monitoring) + integration.Status.SetConditions( + getCamelAppImportingCondition( + integration.Annotations[v1.IntegrationImportedKindLabel], + readyMessage, + )..., + ) + // If it's ready, then we can safely assume the integration is running + if integration.IsConditionTrue(v1.IntegrationConditionReady) { + integration.Status.Phase = v1.IntegrationPhaseRunning + } else { + integration.Status.Phase = v1.IntegrationPhaseError + } + + return integration, nil +} + +func getCamelAppImportingCondition(kind, message string) []v1.IntegrationCondition { + switch kind { + case "Deployment": + return []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionDeploymentAvailable, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionDeploymentAvailableReason, + Message: message, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionDeploymentReadyReason, + Message: message, + }, + } + case "CronJob": + return []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionCronJobAvailable, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionCronJobCreatedReason, + Message: message, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionDeploymentReadyReason, + Message: message, + }, + } + case "KnativeService": + return []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionKnativeServiceAvailable, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionKnativeServiceAvailableReason, + Message: message, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionKnativeServiceReadyReason, + Message: message, + }, + } + default: + return []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionFalse, + Reason: v1.IntegrationConditionImportingKindAvailableReason, + Message: fmt.Sprintf("Unsupported %s import kind", kind), + }, + } + } +} diff --git a/pkg/controller/integration/integration_controller.go b/pkg/controller/integration/integration_controller.go index a70a8713b4..c3dcd30f40 100644 --- a/pkg/controller/integration/integration_controller.go +++ b/pkg/controller/integration/integration_controller.go @@ -27,12 +27,12 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/builder" ctrl "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -324,30 +324,47 @@ func add(ctx context.Context, mgr manager.Manager, c client.Client, r reconcile. // Evaluates to false if the object has been confirmed deleted return !e.DeleteStateUnknown }, - })). - // Watch for IntegrationKit phase transitioning to ready or error, and - // enqueue requests for any integration that matches the kit, in building - // or running phase. - Watches(&v1.IntegrationKit{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { - kit, ok := a.(*v1.IntegrationKit) - if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve integration list") - return []reconcile.Request{} - } + })) + // Watch for all the resources + watchIntegrationResources(c, b) + // Watch for the CronJob conditionally + if ok, err := kubernetes.IsAPIResourceInstalled(c, batchv1.SchemeGroupVersion.String(), reflect.TypeOf(batchv1.CronJob{}).Name()); ok && err == nil { + watchCronJobResources(c, b) + } + // Watch for the Knative Services conditionally + if ok, err := kubernetes.IsAPIResourceInstalled(c, servingv1.SchemeGroupVersion.String(), reflect.TypeOf(servingv1.Service{}).Name()); err != nil { + return err + } else if ok { + if err = watchKnativeResources(ctx, c, b); err != nil { + return err + } + } - return integrationKitEnqueueRequestsFromMapFunc(ctx, c, kit) - })). + return b.Complete(r) +} + +func watchIntegrationResources(c client.Client, b *builder.Builder) { + // Watch for IntegrationKit phase transitioning to ready or error, and + // enqueue requests for any integration that matches the kit, in building + // or running phase. + b.Watches(&v1.IntegrationKit{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { + kit, ok := a.(*v1.IntegrationKit) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve IntegrationKit") + return []reconcile.Request{} + } + return integrationKitEnqueueRequestsFromMapFunc(ctx, c, kit) + })). // Watch for IntegrationPlatform phase transitioning to ready and enqueue // requests for any integrations that are in phase waiting for platform Watches(&v1.IntegrationPlatform{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { p, ok := a.(*v1.IntegrationPlatform) if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to list integrations") + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve IntegrationPlatform") return []reconcile.Request{} } - return integrationPlatformEnqueueRequestsFromMapFunc(ctx, c, p) })). // Watch for Configmaps or Secret used in the Integrations for updates @@ -355,30 +372,29 @@ func add(ctx context.Context, mgr manager.Manager, c client.Client, r reconcile. handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { cm, ok := a.(*corev1.ConfigMap) if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve integration list") + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve Configmap") return []reconcile.Request{} } - return configmapEnqueueRequestsFromMapFunc(ctx, c, cm) })). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { secret, ok := a.(*corev1.Secret) if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve integration list") + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve Secret") return []reconcile.Request{} } - return secretEnqueueRequestsFromMapFunc(ctx, c, secret) })). - // Watch for the owned Deployments - Owns(&appsv1.Deployment{}, builder.WithPredicates(StatusChangedPredicate{})). - // Watch for the Integration Pods + // Watch for the Integration Pods belonging to managed Integrations Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { pod, ok := a.(*corev1.Pod) if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to list integration pods") + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve Pod") + return []reconcile.Request{} + } + if pod.Labels[v1.IntegrationLabel] == "" { return []reconcile.Request{} } return []reconcile.Request{ @@ -389,36 +405,90 @@ func add(ctx context.Context, mgr manager.Manager, c client.Client, r reconcile. }, }, } - })) + })). + // Watch for non managed Deployments (ie, imported) + Watches(&appsv1.Deployment{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { + deploy, ok := a.(*appsv1.Deployment) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve Deployment") + return []reconcile.Request{} + } + return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelDeployment{deploy: deploy}) + }), + builder.WithPredicates(NonManagedObjectPredicate{}), + ). + // Watch for the owned Deployments + Owns(&appsv1.Deployment{}, builder.WithPredicates(StatusChangedPredicate{})) +} - if ok, err := kubernetes.IsAPIResourceInstalled(c, batchv1.SchemeGroupVersion.String(), reflect.TypeOf(batchv1.CronJob{}).Name()); ok && err == nil { +func watchCronJobResources(c client.Client, b *builder.Builder) { + // Watch for non managed Deployments (ie, imported) + b.Watches(&batchv1.CronJob{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { + cron, ok := a.(*batchv1.CronJob) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve CronJob") + return []reconcile.Request{} + } + return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelCronjob{cron: cron}) + }), + builder.WithPredicates(NonManagedObjectPredicate{}), + ). // Watch for the owned CronJobs - b.Owns(&batchv1.CronJob{}, builder.WithPredicates(StatusChangedPredicate{})) - } + Owns(&batchv1.CronJob{}, builder.WithPredicates(StatusChangedPredicate{})) +} - // Watch for the owned Knative Services conditionally - if ok, err := kubernetes.IsAPIResourceInstalled(c, servingv1.SchemeGroupVersion.String(), reflect.TypeOf(servingv1.Service{}).Name()); err != nil { +func watchKnativeResources(ctx context.Context, c client.Client, b *builder.Builder) error { + // Check for permission to watch the Knative Service resource + checkCtx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + if ok, err := kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil { return err } else if ok { - // Check for permission to watch the Knative Service resource - checkCtx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - if ok, err = kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil { - return err - } else if ok { - log.Info("KnativeService resources installed in the cluster. RBAC privileges assigned correctly, you can use Knative features.") - b.Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) - } else { - log.Info(` KnativeService resources installed in the cluster. However Camel K operator has not the required RBAC privileges. You can't use Knative features. - Make sure to apply the required RBAC privileges and restart the Camel K Operator Pod to be able to watch for Camel K managed Knative Services.`) - } - } else { - log.Info(`KnativeService resources are not installed in the cluster. You can't use Knative features. If you install Knative Serving resources after the - Camel K operator, make sure to apply the required RBAC privileges and restart the Camel K Operator Pod to be able to watch for - Camel K managed Knative Services.`) + // Watch for non managed Knative Service (ie, imported) + b.Watches(&servingv1.Service{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { + ksvc, ok := a.(*servingv1.Service) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve KnativeService") + return []reconcile.Request{} + } + return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelKnativeService{ksvc: ksvc}) + }), + builder.WithPredicates(NonManagedObjectPredicate{}), + ). + // We must watch also Revisions, since it's the object that really change when a Knative service scales up and down + Watches(&servingv1.Revision{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { + revision, ok := a.(*servingv1.Revision) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve KnativeService Revision") + return []reconcile.Request{} + } + ksvc := &servingv1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: servingv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: revision.Labels["serving.knative.dev/service"], + Namespace: revision.Namespace, + }, + } + err := c.Get(ctx, ctrl.ObjectKeyFromObject(ksvc), ksvc) + if err != nil { + // The revision does not belong to any managed (owned or imported) KnativeService, just discard + return []reconcile.Request{} + } + return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelKnativeService{ksvc: ksvc}) + }), + builder.WithPredicates(NonManagedObjectPredicate{}), + ). + // Watch for the owned CronJobs + Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) } - - return b.Complete(r) + return nil } var _ reconcile.Reconciler = &reconcileIntegration{} @@ -476,7 +546,12 @@ func (r *reconcileIntegration) Reconcile(ctx context.Context, request reconcile. NewPlatformSetupAction(), NewInitializeAction(), newBuildKitAction(), - NewMonitorAction(), + } + + if instance.IsSynthetic() { + actions = append(actions, NewMonitorSyntheticAction()) + } else { + actions = append(actions, NewMonitorAction()) } for _, a := range actions { diff --git a/pkg/controller/integration/integration_controller_import.go b/pkg/controller/integration/integration_controller_import.go new file mode 100644 index 0000000000..4031855097 --- /dev/null +++ b/pkg/controller/integration/integration_controller_import.go @@ -0,0 +1,249 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + "github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait" + "github.com/apache/camel-k/v2/pkg/client" + "github.com/apache/camel-k/v2/pkg/util/log" + "github.com/apache/camel-k/v2/pkg/util/patch" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// nonManagedCamelAppEnqueueRequestsFromMapFunc represent the function to discover the Integration which has to be woke up: it creates a synthetic +// Integration if the Integration does not exist. This is used to import external Camel applications. +func nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx context.Context, c client.Client, adp NonManagedCamelApplicationAdapter) []reconcile.Request { + if adp.GetIntegrationName() == "" { + return []reconcile.Request{} + } + it := v1.NewIntegration(adp.GetIntegrationNameSpace(), adp.GetIntegrationName()) + err := c.Get(ctx, ctrl.ObjectKeyFromObject(&it), &it) + if err != nil { + if k8serrors.IsNotFound(err) { + // We must perform this check to make sure the resource is not being deleted. + // In such case it makes no sense to create an Integration after it. + err := c.Get(ctx, ctrl.ObjectKeyFromObject(adp.GetAppObj()), adp.GetAppObj()) + if err != nil { + if k8serrors.IsNotFound(err) { + return []reconcile.Request{} + } + log.Errorf(err, "Some error happened while trying to get %s %s resource", adp.GetName(), adp.GetKind()) + } + createSyntheticIntegration(&it, adp) + target, err := patch.ApplyPatch(&it) + if err == nil { + err = c.Patch(ctx, target, ctrl.Apply, ctrl.ForceOwnership, ctrl.FieldOwner("camel-k-operator")) + if err != nil { + log.Errorf(err, "Some error happened while creating a synthetic Integration after %s %s resource", adp.GetName(), adp.GetKind()) + return []reconcile.Request{} + } + log.Infof( + "Created a synthetic Integration %s after %s %s", + it.GetName(), + adp.GetName(), + adp.GetKind(), + ) + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: it.Namespace, + Name: it.Name, + }, + }, + } + } + if err != nil { + log.Infof("Could not create Integration %s: %s", adp.GetIntegrationName(), err.Error()) + return []reconcile.Request{} + } + } + log.Errorf(err, "Could not get Integration %s", it.GetName()) + return []reconcile.Request{} + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: it.Namespace, + Name: it.Name, + }, + }, + } +} + +// createSyntheticIntegration set all required values for a synthetic Integration. +func createSyntheticIntegration(it *v1.Integration, adp NonManagedCamelApplicationAdapter) { + // We need to create a synthetic Integration + it.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: adp.GetName(), + v1.IntegrationImportedKindLabel: adp.GetKind(), + v1.IntegrationSyntheticLabel: "true", + }) + it.Spec = v1.IntegrationSpec{ + Traits: adp.GetTraits(), + } +} + +// NonManagedCamelApplicationAdapter represents a Camel application built and deployed outside the operator lifecycle. +type NonManagedCamelApplicationAdapter interface { + // GetName returns the name of the Camel application. + GetName() string + // GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). + GetKind() string + // GetTraits in used to retrieve the trait configuration. + GetTraits() v1.Traits + // GetIntegrationName return the name of the Integration which has to be imported. + GetIntegrationName() string + // GetIntegrationNameSpace return the namespace of the Integration which has to be imported. + GetIntegrationNameSpace() string + // GetAppObj return the object from which we're importing. + GetAppObj() ctrl.Object +} + +// NonManagedCamelDeployment represents a regular Camel application built and deployed outside the operator lifecycle. +type NonManagedCamelDeployment struct { + deploy *appsv1.Deployment +} + +// GetName returns the name of the Camel application. +func (app *NonManagedCamelDeployment) GetName() string { + return app.deploy.GetName() +} + +// GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). +func (app *NonManagedCamelDeployment) GetKind() string { + return "Deployment" +} + +// GetTraits in used to retrieve the trait configuration. +func (app *NonManagedCamelDeployment) GetTraits() v1.Traits { + return v1.Traits{ + Container: &trait.ContainerTrait{ + Name: app.getContainerNameFromDeployment(), + }, + } +} + +// GetAppObj return the object from which we're importing. +func (app *NonManagedCamelDeployment) GetAppObj() ctrl.Object { + return app.deploy +} + +// GetIntegrationName return the name of the Integration which has to be imported. +func (app *NonManagedCamelDeployment) GetIntegrationName() string { + return app.deploy.Labels[v1.IntegrationLabel] +} + +// GetIntegrationNameSpace return the namespace of the Integration which has to be imported. +func (app *NonManagedCamelDeployment) GetIntegrationNameSpace() string { + return app.deploy.Namespace +} + +// getContainerNameFromDeployment returns the container name which is running the Camel application. +func (app *NonManagedCamelDeployment) getContainerNameFromDeployment() string { + firstContainerName := "" + for _, ct := range app.deploy.Spec.Template.Spec.Containers { + // set as fallback if no container is named as the deployment + if firstContainerName == "" { + firstContainerName = app.deploy.Name + } + if ct.Name == app.deploy.Name { + return app.deploy.Name + } + } + return firstContainerName +} + +// NonManagedCamelCronjob represents a cron Camel application built and deployed outside the operator lifecycle. +type NonManagedCamelCronjob struct { + cron *batchv1.CronJob +} + +// GetName returns the name of the Camel application. +func (app *NonManagedCamelCronjob) GetName() string { + return app.cron.GetName() +} + +// GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). +func (app *NonManagedCamelCronjob) GetKind() string { + return "CronJob" +} + +// GetTraits in used to retrieve the trait configuration. +func (app *NonManagedCamelCronjob) GetTraits() v1.Traits { + return v1.Traits{} +} + +// GetIntegrationName return the name of the Integration which has to be imported. +func (app *NonManagedCamelCronjob) GetIntegrationName() string { + return app.cron.Labels[v1.IntegrationLabel] +} + +// GetIntegrationNameSpace return the namespace of the Integration which has to be imported. +func (app *NonManagedCamelCronjob) GetIntegrationNameSpace() string { + return app.cron.Namespace +} + +// GetAppObj return the object from which we're importing. +func (app *NonManagedCamelCronjob) GetAppObj() ctrl.Object { + return app.cron +} + +// NonManagedCamelKnativeService represents a Knative Service based Camel application built and deployed outside the operator lifecycle. +type NonManagedCamelKnativeService struct { + ksvc *servingv1.Service +} + +// GetName returns the name of the Camel application. +func (app *NonManagedCamelKnativeService) GetName() string { + return app.ksvc.GetName() +} + +// GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). +func (app *NonManagedCamelKnativeService) GetKind() string { + return "KnativeService" +} + +// GetTraits in used to retrieve the trait configuration. +func (app *NonManagedCamelKnativeService) GetTraits() v1.Traits { + return v1.Traits{} +} + +// GetIntegrationName return the name of the Integration which has to be imported. +func (app *NonManagedCamelKnativeService) GetIntegrationName() string { + return app.ksvc.Labels[v1.IntegrationLabel] +} + +// GetIntegrationNameSpace return the namespace of the Integration which has to be imported. +func (app *NonManagedCamelKnativeService) GetIntegrationNameSpace() string { + return app.ksvc.Namespace +} + +// GetAppObj return the object from which we're importing. +func (app *NonManagedCamelKnativeService) GetAppObj() ctrl.Object { + return app.ksvc +} diff --git a/pkg/controller/integration/monitor.go b/pkg/controller/integration/monitor.go index 9a6208fcb9..5630f09c75 100644 --- a/pkg/controller/integration/monitor.go +++ b/pkg/controller/integration/monitor.go @@ -28,6 +28,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" @@ -43,6 +44,7 @@ import ( utilResource "github.com/apache/camel-k/v2/pkg/util/resource" ) +// NewMonitorAction is an action used to monitor manager Integrations. func NewMonitorAction() Action { return &monitorAction{} } @@ -58,7 +60,9 @@ func (action *monitorAction) Name() string { func (action *monitorAction) CanHandle(integration *v1.Integration) bool { return integration.Status.Phase == v1.IntegrationPhaseDeploying || integration.Status.Phase == v1.IntegrationPhaseRunning || - integration.Status.Phase == v1.IntegrationPhaseError + integration.Status.Phase == v1.IntegrationPhaseError || + integration.Status.Phase == v1.IntegrationPhaseImportMissing || + integration.Status.Phase == v1.IntegrationPhaseCannotMonitor } func (action *monitorAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) { @@ -124,16 +128,51 @@ func (action *monitorAction) Handle(ctx context.Context, integration *v1.Integra return nil, err } + return action.monitorPods(ctx, environment, integration) +} + +func (action *monitorAction) monitorPods(ctx context.Context, environment *trait.Environment, integration *v1.Integration) (*v1.Integration, error) { + controller, err := action.newController(environment, integration) + if err != nil { + return nil, err + } + if controller.isEmptySelector() { + // This is happening when the Deployment, CronJob, etc resources + // have no selector or labels to identify sibling Pods. + integration.Status.Phase = v1.IntegrationPhaseCannotMonitor + integration.Status.SetConditions( + v1.IntegrationCondition{ + Type: v1.IntegrationConditionMonitoringPodsAvailable, + Status: corev1.ConditionFalse, + Reason: v1.IntegrationConditionMonitoringPodsAvailableReason, + Message: fmt.Sprintf("Could not find any selector for %s. Make sure to include any label in the template and the Pods generated to inherit such label for monitoring purposes.", controller.getControllerName()), + }, + ) + return integration, nil + } + + controllerSelector := controller.getSelector() + selector, err := metav1.LabelSelectorAsSelector(&controllerSelector) + if err != nil { + return nil, err + } + integration.Status.SetConditions( + v1.IntegrationCondition{ + Type: v1.IntegrationConditionMonitoringPodsAvailable, + Status: corev1.ConditionTrue, + Reason: v1.IntegrationConditionMonitoringPodsAvailableReason, + }, + ) // Enforce the scale sub-resource label selector. // It is used by the HPA that queries the scale sub-resource endpoint, // to list the pods owned by the integration. - integration.Status.Selector = v1.IntegrationLabel + "=" + integration.Name + integration.Status.Selector = selector.String() // Update the replicas count pendingPods := &corev1.PodList{} err = action.client.List(ctx, pendingPods, ctrl.InNamespace(integration.Namespace), - ctrl.MatchingLabels{v1.IntegrationLabel: integration.Name}, + &ctrl.ListOptions{LabelSelector: selector}, ctrl.MatchingFields{"status.phase": string(corev1.PodPending)}) if err != nil { return nil, err @@ -141,7 +180,7 @@ func (action *monitorAction) Handle(ctx context.Context, integration *v1.Integra runningPods := &corev1.PodList{} err = action.client.List(ctx, runningPods, ctrl.InNamespace(integration.Namespace), - ctrl.MatchingLabels{v1.IntegrationLabel: integration.Name}, + &ctrl.ListOptions{LabelSelector: selector}, ctrl.MatchingFields{"status.phase": string(corev1.PodRunning)}) if err != nil { return nil, err @@ -161,7 +200,7 @@ func (action *monitorAction) Handle(ctx context.Context, integration *v1.Integra integration.Status.Phase = v1.IntegrationPhaseRunning } if err = action.updateIntegrationPhaseAndReadyCondition( - ctx, environment, integration, pendingPods.Items, runningPods.Items, + ctx, controller, environment, integration, pendingPods.Items, runningPods.Items, ); err != nil { return nil, err } @@ -255,6 +294,9 @@ type controller interface { checkReadyCondition(ctx context.Context) (bool, error) getPodSpec() corev1.PodSpec updateReadyCondition(readyPods int) bool + getSelector() metav1.LabelSelector + isEmptySelector() bool + getControllerName() string } func (action *monitorAction) newController(env *trait.Environment, integration *v1.Integration) (controller, error) { @@ -311,7 +353,7 @@ func getUpdatedController(env *trait.Environment, obj ctrl.Object) ctrl.Object { } func (action *monitorAction) updateIntegrationPhaseAndReadyCondition( - ctx context.Context, environment *trait.Environment, integration *v1.Integration, + ctx context.Context, controller controller, environment *trait.Environment, integration *v1.Integration, pendingPods []corev1.Pod, runningPods []corev1.Pod, ) error { controller, err := action.newController(environment, integration) diff --git a/pkg/controller/integration/monitor_cronjob.go b/pkg/controller/integration/monitor_cronjob.go index 1620a66c31..f5b9a64195 100644 --- a/pkg/controller/integration/monitor_cronjob.go +++ b/pkg/controller/integration/monitor_cronjob.go @@ -23,6 +23,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime/pkg/client" @@ -110,3 +111,18 @@ func (c *cronJobController) updateReadyCondition(readyPods int) bool { return false } + +func (c *cronJobController) getSelector() metav1.LabelSelector { + // We use all the labels which will be transferred to the Pod generated + return metav1.LabelSelector{ + MatchLabels: c.obj.Spec.JobTemplate.Spec.Template.Labels, + } +} + +func (c *cronJobController) isEmptySelector() bool { + return c.obj.Spec.JobTemplate.Spec.Template.Labels == nil +} + +func (c *cronJobController) getControllerName() string { + return fmt.Sprintf("CronJob/%s", c.obj.Name) +} diff --git a/pkg/controller/integration/monitor_deployment.go b/pkg/controller/integration/monitor_deployment.go index e2f823c16f..e3325f8ea8 100644 --- a/pkg/controller/integration/monitor_deployment.go +++ b/pkg/controller/integration/monitor_deployment.go @@ -23,6 +23,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" "github.com/apache/camel-k/v2/pkg/util/kubernetes" @@ -91,3 +92,15 @@ func (c *deploymentController) updateReadyCondition(readyPods int) bool { return false } + +func (c *deploymentController) getSelector() metav1.LabelSelector { + return *c.obj.Spec.Selector +} + +func (c *deploymentController) isEmptySelector() bool { + return c.obj.Spec.Selector.MatchExpressions == nil && c.obj.Spec.Selector.MatchLabels == nil +} + +func (c *deploymentController) getControllerName() string { + return fmt.Sprintf("Deployment/%s", c.obj.Name) +} diff --git a/pkg/controller/integration/monitor_knative.go b/pkg/controller/integration/monitor_knative.go index 06b7dc82bf..ed614f1a1e 100644 --- a/pkg/controller/integration/monitor_knative.go +++ b/pkg/controller/integration/monitor_knative.go @@ -19,8 +19,10 @@ package integration import ( "context" + "fmt" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" servingv1 "knative.dev/serving/pkg/apis/serving/v1" @@ -63,3 +65,18 @@ func (c *knativeServiceController) updateReadyCondition(readyPods int) bool { return false } + +func (c *knativeServiceController) getSelector() metav1.LabelSelector { + // We use all the labels which will be transferred to the Pod generated + return metav1.LabelSelector{ + MatchLabels: c.obj.Spec.Template.Labels, + } +} + +func (c *knativeServiceController) isEmptySelector() bool { + return c.obj.Spec.Template.Labels == nil +} + +func (c *knativeServiceController) getControllerName() string { + return fmt.Sprintf("KnativeService/%s", c.obj.Name) +} diff --git a/pkg/controller/integration/monitor_synthetic.go b/pkg/controller/integration/monitor_synthetic.go new file mode 100644 index 0000000000..a10a03debe --- /dev/null +++ b/pkg/controller/integration/monitor_synthetic.go @@ -0,0 +1,70 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + "github.com/apache/camel-k/v2/pkg/trait" +) + +// NewMonitorSyntheticAction is an action used to monitor synthetic Integrations. +func NewMonitorSyntheticAction() Action { + return &monitorSyntheticAction{} +} + +type monitorSyntheticAction struct { + monitorAction +} + +func (action *monitorSyntheticAction) Name() string { + return "monitor-synthetic" +} + +func (action *monitorSyntheticAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) { + environment, err := trait.NewSyntheticEnvironment(ctx, action.client, integration, nil) + if err != nil { + if k8serrors.IsNotFound(err) { + // Not an error: the resource from which we imported has been deleted, report in it status. + // It may be a temporary situation, for example, if the deployment from which the Integration is imported + // is being redeployed. For this reason we should keep the Integration instead of forcefully removing it. + message := fmt.Sprintf( + "import %s %s no longer available", + integration.Annotations[v1.IntegrationImportedKindLabel], + integration.Annotations[v1.IntegrationImportedNameLabel], + ) + action.L.Info(message) + integration.SetReadyConditionError(message) + zero := int32(0) + integration.Status.Phase = v1.IntegrationPhaseImportMissing + integration.Status.Replicas = &zero + return integration, nil + } + // report the error + integration.Status.Phase = v1.IntegrationPhaseError + integration.SetReadyCondition(corev1.ConditionFalse, v1.IntegrationConditionImportingKindAvailableReason, err.Error()) + return integration, err + } + + return action.monitorPods(ctx, environment, integration) +} diff --git a/pkg/controller/integration/predicate.go b/pkg/controller/integration/predicate.go index 79d61556a9..0feb71fec3 100644 --- a/pkg/controller/integration/predicate.go +++ b/pkg/controller/integration/predicate.go @@ -21,6 +21,7 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/equality" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -55,3 +56,39 @@ func (StatusChangedPredicate) Update(e event.UpdateEvent) bool { return !equality.Semantic.DeepDerivative(s1.Interface(), s2.Interface()) } + +// NonManagedObjectPredicate implements a generic update predicate function for managed object. +type NonManagedObjectPredicate struct { + predicate.Funcs +} + +// Create --. +func (NonManagedObjectPredicate) Create(e event.CreateEvent) bool { + return !isManagedObject(e.Object) +} + +// Update --. +func (NonManagedObjectPredicate) Update(e event.UpdateEvent) bool { + return !isManagedObject(e.ObjectNew) +} + +// Delete --. +func (NonManagedObjectPredicate) Delete(e event.DeleteEvent) bool { + return !isManagedObject(e.Object) +} + +// Generic --. +func (NonManagedObjectPredicate) Generic(e event.GenericEvent) bool { + return !isManagedObject(e.Object) +} + +// isManagedObject returns true if the object is managed by an Integration. +func isManagedObject(obj ctrl.Object) bool { + for _, mr := range obj.GetOwnerReferences() { + if mr.APIVersion == "camel.apache.org/v1" && + mr.Kind == "Integration" { + return true + } + } + return false +} diff --git a/pkg/trait/camel.go b/pkg/trait/camel.go index 2a4e7b3f4d..71a24550e4 100644 --- a/pkg/trait/camel.go +++ b/pkg/trait/camel.go @@ -64,7 +64,8 @@ func (t *camelTrait) Configure(e *Environment) (bool, *TraitCondition, error) { t.RuntimeVersion = determineRuntimeVersion(e) } - return true, nil, nil + // Don't run this trait for a synthetic Integration + return e.Integration == nil || !e.Integration.IsSynthetic(), nil, nil } func (t *camelTrait) Apply(e *Environment) error { diff --git a/pkg/trait/platform.go b/pkg/trait/platform.go index 58e5455977..ec3fb1e04f 100644 --- a/pkg/trait/platform.go +++ b/pkg/trait/platform.go @@ -73,7 +73,8 @@ func (t *platformTrait) Configure(e *Environment) (bool, *TraitCondition, error) } } - return true, nil, nil + // Don't run this trait for a synthetic Integration + return e.Integration == nil || !e.Integration.IsSynthetic(), nil, nil } func (t *platformTrait) Apply(e *Environment) error { diff --git a/pkg/trait/trait.go b/pkg/trait/trait.go index 33676616f1..16794ee12d 100644 --- a/pkg/trait/trait.go +++ b/pkg/trait/trait.go @@ -24,13 +24,15 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" "github.com/apache/camel-k/v2/pkg/client" "github.com/apache/camel-k/v2/pkg/platform" "github.com/apache/camel-k/v2/pkg/util/kubernetes" "github.com/apache/camel-k/v2/pkg/util/log" - k8sclient "sigs.k8s.io/controller-runtime/pkg/client" + serving "knative.dev/serving/pkg/apis/serving/v1" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" ) func Apply(ctx context.Context, c client.Client, integration *v1.Integration, kit *v1.IntegrationKit) (*Environment, error) { @@ -97,7 +99,7 @@ func newEnvironment(ctx context.Context, c client.Client, integration *v1.Integr return nil, errors.New("neither integration nor kit are set") } - var obj k8sclient.Object + var obj ctrl.Object if integration != nil { obj = integration } else if kit != nil { @@ -134,3 +136,68 @@ func newEnvironment(ctx context.Context, c client.Client, integration *v1.Integr return &env, nil } + +// NewSyntheticEnvironment creates an environment suitable for a synthetic Integration. +func NewSyntheticEnvironment(ctx context.Context, c client.Client, integration *v1.Integration, kit *v1.IntegrationKit) (*Environment, error) { + if integration == nil && kit == nil { + return nil, errors.New("neither integration nor kit are set") + } + + env := Environment{ + Ctx: ctx, + Platform: nil, + Client: c, + IntegrationKit: kit, + Integration: integration, + ExecutedTraits: make([]Trait, 0), + Resources: kubernetes.NewCollection(), + EnvVars: make([]corev1.EnvVar, 0), + ApplicationProperties: make(map[string]string), + } + + catalog := NewCatalog(c) + // set the catalog + env.Catalog = catalog + // we need to simulate the execution of the traits to fill certain values used later by monitoring + _, err := catalog.apply(&env) + if err != nil { + return nil, fmt.Errorf("error during trait customization: %w", err) + } + camelApp, err := getCamelAppObject( + ctx, + c, + integration.Annotations[v1.IntegrationImportedKindLabel], + integration.Namespace, + integration.Annotations[v1.IntegrationImportedNameLabel], + ) + if err != nil { + return nil, err + } + env.Resources.Add(camelApp) + + return &env, nil +} + +func getCamelAppObject(ctx context.Context, c client.Client, kind, namespace, name string) (ctrl.Object, error) { + switch kind { + case "Deployment": + return c.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + case "CronJob": + return c.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + case "KnativeService": + ksvc := &serving.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: serving.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + err := c.Get(ctx, ctrl.ObjectKeyFromObject(ksvc), ksvc) + return ksvc, err + default: + return nil, fmt.Errorf("cannot create a synthetic environment for %s kind", kind) + } +} From 817dc2e04c7d7dd2e0648c0c82b6aa9ce4e6ef82 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Thu, 30 Nov 2023 10:34:06 +0100 Subject: [PATCH 2/8] chore: require Pods to have integration label --- pkg/cmd/operator/operator.go | 1 + .../integration/integration_controller.go | 28 --------------- pkg/controller/integration/monitor.go | 34 +++++++++++-------- pkg/controller/integration/monitor_cronjob.go | 12 ++----- .../integration/monitor_deployment.go | 9 ++--- pkg/controller/integration/monitor_knative.go | 12 ++----- 6 files changed, 26 insertions(+), 70 deletions(-) diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go index ab59ab638e..04b5ea8b23 100644 --- a/pkg/cmd/operator/operator.go +++ b/pkg/cmd/operator/operator.go @@ -188,6 +188,7 @@ func Run(healthPort, monitoringPort int32, leaderElection bool, leaderElectionID selector := labels.NewSelector().Add(*hasIntegrationLabel) selectors := map[ctrl.Object]cache.ByObject{ + &corev1.Pod{}: {Label: selector}, &appsv1.Deployment{}: {Label: selector}, &batchv1.Job{}: {Label: selector}, &servingv1.Service{}: {Label: selector}, diff --git a/pkg/controller/integration/integration_controller.go b/pkg/controller/integration/integration_controller.go index c3dcd30f40..1979b9d4ac 100644 --- a/pkg/controller/integration/integration_controller.go +++ b/pkg/controller/integration/integration_controller.go @@ -27,7 +27,6 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -458,33 +457,6 @@ func watchKnativeResources(ctx context.Context, c client.Client, b *builder.Buil }), builder.WithPredicates(NonManagedObjectPredicate{}), ). - // We must watch also Revisions, since it's the object that really change when a Knative service scales up and down - Watches(&servingv1.Revision{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { - revision, ok := a.(*servingv1.Revision) - if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve KnativeService Revision") - return []reconcile.Request{} - } - ksvc := &servingv1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - APIVersion: servingv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: revision.Labels["serving.knative.dev/service"], - Namespace: revision.Namespace, - }, - } - err := c.Get(ctx, ctrl.ObjectKeyFromObject(ksvc), ksvc) - if err != nil { - // The revision does not belong to any managed (owned or imported) KnativeService, just discard - return []reconcile.Request{} - } - return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelKnativeService{ksvc: ksvc}) - }), - builder.WithPredicates(NonManagedObjectPredicate{}), - ). // Watch for the owned CronJobs Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) } diff --git a/pkg/controller/integration/monitor.go b/pkg/controller/integration/monitor.go index 5630f09c75..e2d3b32a35 100644 --- a/pkg/controller/integration/monitor.go +++ b/pkg/controller/integration/monitor.go @@ -28,7 +28,6 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" @@ -136,26 +135,30 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait if err != nil { return nil, err } - if controller.isEmptySelector() { + + // In order to simplify the monitoring and have a minor resource requirement, we will watch only those Pods + // which are labeled with `camel.apache.org/integration`. This is a design choice that requires the user to + // voluntarily add a label to their Pods (via template, possibly) in order to monitor the non managed Camel applications. + + if !controller.hasTemplateIntegrationLabel() { // This is happening when the Deployment, CronJob, etc resources - // have no selector or labels to identify sibling Pods. + // miss the Integration label, required to identify sibling Pods. integration.Status.Phase = v1.IntegrationPhaseCannotMonitor integration.Status.SetConditions( v1.IntegrationCondition{ - Type: v1.IntegrationConditionMonitoringPodsAvailable, - Status: corev1.ConditionFalse, - Reason: v1.IntegrationConditionMonitoringPodsAvailableReason, - Message: fmt.Sprintf("Could not find any selector for %s. Make sure to include any label in the template and the Pods generated to inherit such label for monitoring purposes.", controller.getControllerName()), + Type: v1.IntegrationConditionMonitoringPodsAvailable, + Status: corev1.ConditionFalse, + Reason: v1.IntegrationConditionMonitoringPodsAvailableReason, + Message: fmt.Sprintf( + "Could not find `camel.apache.org/integration: %s` label in the %s template. Make sure to include this label in the template for Pod monitoring purposes.", + integration.GetName(), + controller.getControllerName(), + ), }, ) return integration, nil } - controllerSelector := controller.getSelector() - selector, err := metav1.LabelSelectorAsSelector(&controllerSelector) - if err != nil { - return nil, err - } integration.Status.SetConditions( v1.IntegrationCondition{ Type: v1.IntegrationConditionMonitoringPodsAvailable, @@ -166,13 +169,13 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait // Enforce the scale sub-resource label selector. // It is used by the HPA that queries the scale sub-resource endpoint, // to list the pods owned by the integration. - integration.Status.Selector = selector.String() + integration.Status.Selector = v1.IntegrationLabel + "=" + integration.Name // Update the replicas count pendingPods := &corev1.PodList{} err = action.client.List(ctx, pendingPods, ctrl.InNamespace(integration.Namespace), - &ctrl.ListOptions{LabelSelector: selector}, + ctrl.MatchingLabels{v1.IntegrationLabel: integration.Name}, ctrl.MatchingFields{"status.phase": string(corev1.PodPending)}) if err != nil { return nil, err @@ -180,7 +183,7 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait runningPods := &corev1.PodList{} err = action.client.List(ctx, runningPods, ctrl.InNamespace(integration.Namespace), - &ctrl.ListOptions{LabelSelector: selector}, + ctrl.MatchingLabels{v1.IntegrationLabel: integration.Name}, ctrl.MatchingFields{"status.phase": string(corev1.PodRunning)}) if err != nil { return nil, err @@ -296,6 +299,7 @@ type controller interface { updateReadyCondition(readyPods int) bool getSelector() metav1.LabelSelector isEmptySelector() bool + hasTemplateIntegrationLabel() bool getControllerName() string } diff --git a/pkg/controller/integration/monitor_cronjob.go b/pkg/controller/integration/monitor_cronjob.go index f5b9a64195..caa0a67a77 100644 --- a/pkg/controller/integration/monitor_cronjob.go +++ b/pkg/controller/integration/monitor_cronjob.go @@ -23,7 +23,6 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime/pkg/client" @@ -112,15 +111,8 @@ func (c *cronJobController) updateReadyCondition(readyPods int) bool { return false } -func (c *cronJobController) getSelector() metav1.LabelSelector { - // We use all the labels which will be transferred to the Pod generated - return metav1.LabelSelector{ - MatchLabels: c.obj.Spec.JobTemplate.Spec.Template.Labels, - } -} - -func (c *cronJobController) isEmptySelector() bool { - return c.obj.Spec.JobTemplate.Spec.Template.Labels == nil +func (c *cronJobController) hasTemplateIntegrationLabel() bool { + return c.obj.Spec.JobTemplate.Spec.Template.Labels[v1.IntegrationLabel] != "" } func (c *cronJobController) getControllerName() string { diff --git a/pkg/controller/integration/monitor_deployment.go b/pkg/controller/integration/monitor_deployment.go index e3325f8ea8..08b0c35e93 100644 --- a/pkg/controller/integration/monitor_deployment.go +++ b/pkg/controller/integration/monitor_deployment.go @@ -23,7 +23,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" "github.com/apache/camel-k/v2/pkg/util/kubernetes" @@ -93,12 +92,8 @@ func (c *deploymentController) updateReadyCondition(readyPods int) bool { return false } -func (c *deploymentController) getSelector() metav1.LabelSelector { - return *c.obj.Spec.Selector -} - -func (c *deploymentController) isEmptySelector() bool { - return c.obj.Spec.Selector.MatchExpressions == nil && c.obj.Spec.Selector.MatchLabels == nil +func (c *deploymentController) hasTemplateIntegrationLabel() bool { + return c.obj.Spec.Template.Labels[v1.IntegrationLabel] != "" } func (c *deploymentController) getControllerName() string { diff --git a/pkg/controller/integration/monitor_knative.go b/pkg/controller/integration/monitor_knative.go index ed614f1a1e..1c70195987 100644 --- a/pkg/controller/integration/monitor_knative.go +++ b/pkg/controller/integration/monitor_knative.go @@ -22,7 +22,6 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" servingv1 "knative.dev/serving/pkg/apis/serving/v1" @@ -66,15 +65,8 @@ func (c *knativeServiceController) updateReadyCondition(readyPods int) bool { return false } -func (c *knativeServiceController) getSelector() metav1.LabelSelector { - // We use all the labels which will be transferred to the Pod generated - return metav1.LabelSelector{ - MatchLabels: c.obj.Spec.Template.Labels, - } -} - -func (c *knativeServiceController) isEmptySelector() bool { - return c.obj.Spec.Template.Labels == nil +func (c *knativeServiceController) hasTemplateIntegrationLabel() bool { + return c.obj.Spec.Template.Labels[v1.IntegrationLabel] != "" } func (c *knativeServiceController) getControllerName() string { From baae130000dcd84ca47a09e0b3a1bb66d412ca8a Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Fri, 1 Dec 2023 15:48:32 +0100 Subject: [PATCH 3/8] chore: synthetic Integration separate controller --- .../namespaced/operator-role-knative.yaml | 8 - config/rbac/namespaced/operator-role.yaml | 1 + helm/camel-k/templates/operator-role.yaml | 1 + pkg/cmd/operator/operator.go | 3 + .../integration/integration_controller.go | 48 +-- .../integration_controller_import.go | 249 --------------- pkg/controller/integration/monitor.go | 1 - .../integration/monitor_synthetic.go | 18 -- pkg/controller/integration/predicate.go | 37 --- pkg/controller/pipe/pipe_controller.go | 2 +- pkg/controller/synthetic/synthetic.go | 300 ++++++++++++++++++ 11 files changed, 312 insertions(+), 356 deletions(-) delete mode 100644 pkg/controller/integration/integration_controller_import.go create mode 100644 pkg/controller/synthetic/synthetic.go diff --git a/config/rbac/namespaced/operator-role-knative.yaml b/config/rbac/namespaced/operator-role-knative.yaml index 7e1d2f3492..3cba80931b 100644 --- a/config/rbac/namespaced/operator-role-knative.yaml +++ b/config/rbac/namespaced/operator-role-knative.yaml @@ -35,14 +35,6 @@ rules: - patch - update - watch -- apiGroups: - - serving.knative.dev - resources: - - revisions - verbs: - - get - - list - - watch - apiGroups: - eventing.knative.dev resources: diff --git a/config/rbac/namespaced/operator-role.yaml b/config/rbac/namespaced/operator-role.yaml index 4ddc2d4c17..0f364463e4 100644 --- a/config/rbac/namespaced/operator-role.yaml +++ b/config/rbac/namespaced/operator-role.yaml @@ -45,6 +45,7 @@ rules: - camel.apache.org resources: - builds + - integrations verbs: - delete - apiGroups: diff --git a/helm/camel-k/templates/operator-role.yaml b/helm/camel-k/templates/operator-role.yaml index b8e709b80d..40ef9742ac 100644 --- a/helm/camel-k/templates/operator-role.yaml +++ b/helm/camel-k/templates/operator-role.yaml @@ -54,6 +54,7 @@ rules: - camel.apache.org resources: - builds + - integrations verbs: - delete - apiGroups: diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go index 04b5ea8b23..12edd7cc11 100644 --- a/pkg/cmd/operator/operator.go +++ b/pkg/cmd/operator/operator.go @@ -59,6 +59,7 @@ import ( v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" "github.com/apache/camel-k/v2/pkg/client" "github.com/apache/camel-k/v2/pkg/controller" + "github.com/apache/camel-k/v2/pkg/controller/synthetic" "github.com/apache/camel-k/v2/pkg/event" "github.com/apache/camel-k/v2/pkg/install" "github.com/apache/camel-k/v2/pkg/platform" @@ -231,6 +232,8 @@ func Run(healthPort, monitoringPort int32, leaderElection bool, leaderElectionID install.OperatorStartupOptionalTools(installCtx, bootstrapClient, watchNamespace, operatorNamespace, log) exitOnError(findOrCreateIntegrationPlatform(installCtx, bootstrapClient, operatorNamespace), "failed to create integration platform") + log.Info("Starting the synthetic Integration manager") + exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache(), mgr.GetAPIReader()), "synthetic Integration manager error") log.Info("Starting the manager") exitOnError(mgr.Start(ctx), "manager exited non-zero") } diff --git a/pkg/controller/integration/integration_controller.go b/pkg/controller/integration/integration_controller.go index 1979b9d4ac..a16aa69672 100644 --- a/pkg/controller/integration/integration_controller.go +++ b/pkg/controller/integration/integration_controller.go @@ -328,7 +328,7 @@ func add(ctx context.Context, mgr manager.Manager, c client.Client, r reconcile. watchIntegrationResources(c, b) // Watch for the CronJob conditionally if ok, err := kubernetes.IsAPIResourceInstalled(c, batchv1.SchemeGroupVersion.String(), reflect.TypeOf(batchv1.CronJob{}).Name()); ok && err == nil { - watchCronJobResources(c, b) + watchCronJobResources(b) } // Watch for the Knative Services conditionally if ok, err := kubernetes.IsAPIResourceInstalled(c, servingv1.SchemeGroupVersion.String(), reflect.TypeOf(servingv1.Service{}).Name()); err != nil { @@ -405,37 +405,13 @@ func watchIntegrationResources(c client.Client, b *builder.Builder) { }, } })). - // Watch for non managed Deployments (ie, imported) - Watches(&appsv1.Deployment{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { - deploy, ok := a.(*appsv1.Deployment) - if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve Deployment") - return []reconcile.Request{} - } - return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelDeployment{deploy: deploy}) - }), - builder.WithPredicates(NonManagedObjectPredicate{}), - ). // Watch for the owned Deployments Owns(&appsv1.Deployment{}, builder.WithPredicates(StatusChangedPredicate{})) } -func watchCronJobResources(c client.Client, b *builder.Builder) { - // Watch for non managed Deployments (ie, imported) - b.Watches(&batchv1.CronJob{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { - cron, ok := a.(*batchv1.CronJob) - if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve CronJob") - return []reconcile.Request{} - } - return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelCronjob{cron: cron}) - }), - builder.WithPredicates(NonManagedObjectPredicate{}), - ). - // Watch for the owned CronJobs - Owns(&batchv1.CronJob{}, builder.WithPredicates(StatusChangedPredicate{})) +func watchCronJobResources(b *builder.Builder) { + // Watch for the owned CronJobs + b.Owns(&batchv1.CronJob{}, builder.WithPredicates(StatusChangedPredicate{})) } func watchKnativeResources(ctx context.Context, c client.Client, b *builder.Builder) error { @@ -445,20 +421,8 @@ func watchKnativeResources(ctx context.Context, c client.Client, b *builder.Buil if ok, err := kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil { return err } else if ok { - // Watch for non managed Knative Service (ie, imported) - b.Watches(&servingv1.Service{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a ctrl.Object) []reconcile.Request { - ksvc, ok := a.(*servingv1.Service) - if !ok { - log.Error(fmt.Errorf("type assertion failed: %v", a), "Failed to retrieve to retrieve KnativeService") - return []reconcile.Request{} - } - return nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx, c, &NonManagedCamelKnativeService{ksvc: ksvc}) - }), - builder.WithPredicates(NonManagedObjectPredicate{}), - ). - // Watch for the owned CronJobs - Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) + // Watch for the owned Knative Services + b.Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) } return nil } diff --git a/pkg/controller/integration/integration_controller_import.go b/pkg/controller/integration/integration_controller_import.go deleted file mode 100644 index 4031855097..0000000000 --- a/pkg/controller/integration/integration_controller_import.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Licensed to the Apache Software Foundation (ASF) under one or more -contributor license agreements. See the NOTICE file distributed with -this work for additional information regarding copyright ownership. -The ASF licenses this file to You under the Apache License, Version 2.0 -(the "License"); you may not use this file except in compliance with -the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - - v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" - "github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait" - "github.com/apache/camel-k/v2/pkg/client" - "github.com/apache/camel-k/v2/pkg/util/log" - "github.com/apache/camel-k/v2/pkg/util/patch" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - servingv1 "knative.dev/serving/pkg/apis/serving/v1" - ctrl "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// nonManagedCamelAppEnqueueRequestsFromMapFunc represent the function to discover the Integration which has to be woke up: it creates a synthetic -// Integration if the Integration does not exist. This is used to import external Camel applications. -func nonManagedCamelAppEnqueueRequestsFromMapFunc(ctx context.Context, c client.Client, adp NonManagedCamelApplicationAdapter) []reconcile.Request { - if adp.GetIntegrationName() == "" { - return []reconcile.Request{} - } - it := v1.NewIntegration(adp.GetIntegrationNameSpace(), adp.GetIntegrationName()) - err := c.Get(ctx, ctrl.ObjectKeyFromObject(&it), &it) - if err != nil { - if k8serrors.IsNotFound(err) { - // We must perform this check to make sure the resource is not being deleted. - // In such case it makes no sense to create an Integration after it. - err := c.Get(ctx, ctrl.ObjectKeyFromObject(adp.GetAppObj()), adp.GetAppObj()) - if err != nil { - if k8serrors.IsNotFound(err) { - return []reconcile.Request{} - } - log.Errorf(err, "Some error happened while trying to get %s %s resource", adp.GetName(), adp.GetKind()) - } - createSyntheticIntegration(&it, adp) - target, err := patch.ApplyPatch(&it) - if err == nil { - err = c.Patch(ctx, target, ctrl.Apply, ctrl.ForceOwnership, ctrl.FieldOwner("camel-k-operator")) - if err != nil { - log.Errorf(err, "Some error happened while creating a synthetic Integration after %s %s resource", adp.GetName(), adp.GetKind()) - return []reconcile.Request{} - } - log.Infof( - "Created a synthetic Integration %s after %s %s", - it.GetName(), - adp.GetName(), - adp.GetKind(), - ) - return []reconcile.Request{ - { - NamespacedName: types.NamespacedName{ - Namespace: it.Namespace, - Name: it.Name, - }, - }, - } - } - if err != nil { - log.Infof("Could not create Integration %s: %s", adp.GetIntegrationName(), err.Error()) - return []reconcile.Request{} - } - } - log.Errorf(err, "Could not get Integration %s", it.GetName()) - return []reconcile.Request{} - } - - return []reconcile.Request{ - { - NamespacedName: types.NamespacedName{ - Namespace: it.Namespace, - Name: it.Name, - }, - }, - } -} - -// createSyntheticIntegration set all required values for a synthetic Integration. -func createSyntheticIntegration(it *v1.Integration, adp NonManagedCamelApplicationAdapter) { - // We need to create a synthetic Integration - it.SetAnnotations(map[string]string{ - v1.IntegrationImportedNameLabel: adp.GetName(), - v1.IntegrationImportedKindLabel: adp.GetKind(), - v1.IntegrationSyntheticLabel: "true", - }) - it.Spec = v1.IntegrationSpec{ - Traits: adp.GetTraits(), - } -} - -// NonManagedCamelApplicationAdapter represents a Camel application built and deployed outside the operator lifecycle. -type NonManagedCamelApplicationAdapter interface { - // GetName returns the name of the Camel application. - GetName() string - // GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). - GetKind() string - // GetTraits in used to retrieve the trait configuration. - GetTraits() v1.Traits - // GetIntegrationName return the name of the Integration which has to be imported. - GetIntegrationName() string - // GetIntegrationNameSpace return the namespace of the Integration which has to be imported. - GetIntegrationNameSpace() string - // GetAppObj return the object from which we're importing. - GetAppObj() ctrl.Object -} - -// NonManagedCamelDeployment represents a regular Camel application built and deployed outside the operator lifecycle. -type NonManagedCamelDeployment struct { - deploy *appsv1.Deployment -} - -// GetName returns the name of the Camel application. -func (app *NonManagedCamelDeployment) GetName() string { - return app.deploy.GetName() -} - -// GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). -func (app *NonManagedCamelDeployment) GetKind() string { - return "Deployment" -} - -// GetTraits in used to retrieve the trait configuration. -func (app *NonManagedCamelDeployment) GetTraits() v1.Traits { - return v1.Traits{ - Container: &trait.ContainerTrait{ - Name: app.getContainerNameFromDeployment(), - }, - } -} - -// GetAppObj return the object from which we're importing. -func (app *NonManagedCamelDeployment) GetAppObj() ctrl.Object { - return app.deploy -} - -// GetIntegrationName return the name of the Integration which has to be imported. -func (app *NonManagedCamelDeployment) GetIntegrationName() string { - return app.deploy.Labels[v1.IntegrationLabel] -} - -// GetIntegrationNameSpace return the namespace of the Integration which has to be imported. -func (app *NonManagedCamelDeployment) GetIntegrationNameSpace() string { - return app.deploy.Namespace -} - -// getContainerNameFromDeployment returns the container name which is running the Camel application. -func (app *NonManagedCamelDeployment) getContainerNameFromDeployment() string { - firstContainerName := "" - for _, ct := range app.deploy.Spec.Template.Spec.Containers { - // set as fallback if no container is named as the deployment - if firstContainerName == "" { - firstContainerName = app.deploy.Name - } - if ct.Name == app.deploy.Name { - return app.deploy.Name - } - } - return firstContainerName -} - -// NonManagedCamelCronjob represents a cron Camel application built and deployed outside the operator lifecycle. -type NonManagedCamelCronjob struct { - cron *batchv1.CronJob -} - -// GetName returns the name of the Camel application. -func (app *NonManagedCamelCronjob) GetName() string { - return app.cron.GetName() -} - -// GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). -func (app *NonManagedCamelCronjob) GetKind() string { - return "CronJob" -} - -// GetTraits in used to retrieve the trait configuration. -func (app *NonManagedCamelCronjob) GetTraits() v1.Traits { - return v1.Traits{} -} - -// GetIntegrationName return the name of the Integration which has to be imported. -func (app *NonManagedCamelCronjob) GetIntegrationName() string { - return app.cron.Labels[v1.IntegrationLabel] -} - -// GetIntegrationNameSpace return the namespace of the Integration which has to be imported. -func (app *NonManagedCamelCronjob) GetIntegrationNameSpace() string { - return app.cron.Namespace -} - -// GetAppObj return the object from which we're importing. -func (app *NonManagedCamelCronjob) GetAppObj() ctrl.Object { - return app.cron -} - -// NonManagedCamelKnativeService represents a Knative Service based Camel application built and deployed outside the operator lifecycle. -type NonManagedCamelKnativeService struct { - ksvc *servingv1.Service -} - -// GetName returns the name of the Camel application. -func (app *NonManagedCamelKnativeService) GetName() string { - return app.ksvc.GetName() -} - -// GetKind returns the kind of the Camel application (ie, Deployment, Cronjob, ...). -func (app *NonManagedCamelKnativeService) GetKind() string { - return "KnativeService" -} - -// GetTraits in used to retrieve the trait configuration. -func (app *NonManagedCamelKnativeService) GetTraits() v1.Traits { - return v1.Traits{} -} - -// GetIntegrationName return the name of the Integration which has to be imported. -func (app *NonManagedCamelKnativeService) GetIntegrationName() string { - return app.ksvc.Labels[v1.IntegrationLabel] -} - -// GetIntegrationNameSpace return the namespace of the Integration which has to be imported. -func (app *NonManagedCamelKnativeService) GetIntegrationNameSpace() string { - return app.ksvc.Namespace -} - -// GetAppObj return the object from which we're importing. -func (app *NonManagedCamelKnativeService) GetAppObj() ctrl.Object { - return app.ksvc -} diff --git a/pkg/controller/integration/monitor.go b/pkg/controller/integration/monitor.go index e2d3b32a35..fb86ed41db 100644 --- a/pkg/controller/integration/monitor.go +++ b/pkg/controller/integration/monitor.go @@ -60,7 +60,6 @@ func (action *monitorAction) CanHandle(integration *v1.Integration) bool { return integration.Status.Phase == v1.IntegrationPhaseDeploying || integration.Status.Phase == v1.IntegrationPhaseRunning || integration.Status.Phase == v1.IntegrationPhaseError || - integration.Status.Phase == v1.IntegrationPhaseImportMissing || integration.Status.Phase == v1.IntegrationPhaseCannotMonitor } diff --git a/pkg/controller/integration/monitor_synthetic.go b/pkg/controller/integration/monitor_synthetic.go index a10a03debe..a1aa86a43d 100644 --- a/pkg/controller/integration/monitor_synthetic.go +++ b/pkg/controller/integration/monitor_synthetic.go @@ -19,10 +19,8 @@ package integration import ( "context" - "fmt" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" "github.com/apache/camel-k/v2/pkg/trait" @@ -44,22 +42,6 @@ func (action *monitorSyntheticAction) Name() string { func (action *monitorSyntheticAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) { environment, err := trait.NewSyntheticEnvironment(ctx, action.client, integration, nil) if err != nil { - if k8serrors.IsNotFound(err) { - // Not an error: the resource from which we imported has been deleted, report in it status. - // It may be a temporary situation, for example, if the deployment from which the Integration is imported - // is being redeployed. For this reason we should keep the Integration instead of forcefully removing it. - message := fmt.Sprintf( - "import %s %s no longer available", - integration.Annotations[v1.IntegrationImportedKindLabel], - integration.Annotations[v1.IntegrationImportedNameLabel], - ) - action.L.Info(message) - integration.SetReadyConditionError(message) - zero := int32(0) - integration.Status.Phase = v1.IntegrationPhaseImportMissing - integration.Status.Replicas = &zero - return integration, nil - } // report the error integration.Status.Phase = v1.IntegrationPhaseError integration.SetReadyCondition(corev1.ConditionFalse, v1.IntegrationConditionImportingKindAvailableReason, err.Error()) diff --git a/pkg/controller/integration/predicate.go b/pkg/controller/integration/predicate.go index 0feb71fec3..79d61556a9 100644 --- a/pkg/controller/integration/predicate.go +++ b/pkg/controller/integration/predicate.go @@ -21,7 +21,6 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/equality" - ctrl "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -56,39 +55,3 @@ func (StatusChangedPredicate) Update(e event.UpdateEvent) bool { return !equality.Semantic.DeepDerivative(s1.Interface(), s2.Interface()) } - -// NonManagedObjectPredicate implements a generic update predicate function for managed object. -type NonManagedObjectPredicate struct { - predicate.Funcs -} - -// Create --. -func (NonManagedObjectPredicate) Create(e event.CreateEvent) bool { - return !isManagedObject(e.Object) -} - -// Update --. -func (NonManagedObjectPredicate) Update(e event.UpdateEvent) bool { - return !isManagedObject(e.ObjectNew) -} - -// Delete --. -func (NonManagedObjectPredicate) Delete(e event.DeleteEvent) bool { - return !isManagedObject(e.Object) -} - -// Generic --. -func (NonManagedObjectPredicate) Generic(e event.GenericEvent) bool { - return !isManagedObject(e.Object) -} - -// isManagedObject returns true if the object is managed by an Integration. -func isManagedObject(obj ctrl.Object) bool { - for _, mr := range obj.GetOwnerReferences() { - if mr.APIVersion == "camel.apache.org/v1" && - mr.Kind == "Integration" { - return true - } - } - return false -} diff --git a/pkg/controller/pipe/pipe_controller.go b/pkg/controller/pipe/pipe_controller.go index 36da7fca1a..5b174e435e 100644 --- a/pkg/controller/pipe/pipe_controller.go +++ b/pkg/controller/pipe/pipe_controller.go @@ -66,7 +66,7 @@ func newReconciler(mgr manager.Manager, c client.Client) reconcile.Reconciler { } func add(mgr manager.Manager, r reconcile.Reconciler) error { - c, err := controller.New("kamelet-binding-controller", mgr, controller.Options{Reconciler: r}) + c, err := controller.New("pipe-controller", mgr, controller.Options{Reconciler: r}) if err != nil { return err } diff --git a/pkg/controller/synthetic/synthetic.go b/pkg/controller/synthetic/synthetic.go new file mode 100644 index 0000000000..bd785d3185 --- /dev/null +++ b/pkg/controller/synthetic/synthetic.go @@ -0,0 +1,300 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synthetic + +import ( + "context" + "fmt" + "reflect" + + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + "github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait" + "github.com/apache/camel-k/v2/pkg/client" + "github.com/apache/camel-k/v2/pkg/platform" + "github.com/apache/camel-k/v2/pkg/util/kubernetes" + "github.com/apache/camel-k/v2/pkg/util/log" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + clientgocache "k8s.io/client-go/tools/cache" + "knative.dev/serving/pkg/apis/serving" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + "sigs.k8s.io/controller-runtime/pkg/cache" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ManageSyntheticIntegrations is the controller for synthetic Integrations. Consider that the lifecycle of the objects are driven +// by the way we are monitoring them. Since we're filtering by `camel.apache.org/integration` label in the cached clinet, +// you must consider an add, update or delete +// accordingly, ie, when the user label the resource, then it is considered as an add, when it removes the label, it is considered as a delete. +// We must filter only non managed objects in order to avoid to conflict with the reconciliation loop of managed objects (owned by an Integration). +func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cache.Cache, reader ctrl.Reader) error { + informers, err := getInformers(ctx, c, cache) + if err != nil { + return err + } + for _, informer := range informers { + _, err := informer.AddEventHandler(clientgocache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + ctrlObj, ok := obj.(ctrl.Object) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", obj), "Failed to retrieve Object on add event") + return + } + if !isManagedObject(ctrlObj) { + integrationName := ctrlObj.GetLabels()[v1.IntegrationLabel] + it, err := getSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName) + if err != nil { + if k8serrors.IsNotFound(err) { + adapter, err := nonManagedCamelApplicationFactory(ctrlObj) + if err != nil { + log.Errorf(err, "Some error happened while creating a Camel application adapter for %s", integrationName) + } + if err = createSyntheticIntegration(ctx, c, adapter.Integration()); err != nil { + log.Errorf(err, "Some error happened while creating a synthetic Integration %s", integrationName) + } + log.Infof("Created a synthetic Integration %s after %s resource object", it.GetName(), ctrlObj.GetName()) + } else { + log.Errorf(err, "Some error happened while loading a synthetic Integration %s", integrationName) + } + } else { + if it.Status.Phase == v1.IntegrationPhaseImportMissing { + // Update with proper phase (reconciliation will take care) + it.Status.Phase = v1.IntegrationPhaseNone + if err = updateSyntheticIntegration(ctx, c, it); err != nil { + log.Errorf(err, "Some error happened while updatinf a synthetic Integration %s", integrationName) + } + } else { + log.Infof("Synthetic Integration %s is in phase %s. Skipping.", integrationName, it.Status.Phase) + } + } + } + }, + DeleteFunc: func(obj interface{}) { + ctrlObj, ok := obj.(ctrl.Object) + if !ok { + log.Error(fmt.Errorf("type assertion failed: %v", obj), "Failed to retrieve Object on delete event") + return + } + if !isManagedObject(ctrlObj) { + integrationName := ctrlObj.GetLabels()[v1.IntegrationLabel] + // We must use a non caching client to understand if the object has been deleted from the cluster or only deleted from + // the cache (ie, user removed the importing label) + err := reader.Get(ctx, ctrl.ObjectKeyFromObject(ctrlObj), ctrlObj) + if err != nil { + if k8serrors.IsNotFound(err) { + // Object removed from the cluster + it, err := getSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName) + if err != nil { + log.Errorf(err, "Some error happened while loading a synthetic Integration %s", it.Name) + return + } + // The resource from which we imported has been deleted, report in it status. + // It may be a temporary situation, for example, if the deployment from which the Integration is imported + // is being redeployed. For this reason we should keep the Integration instead of forcefully removing it. + message := fmt.Sprintf( + "import %s %s no longer available", + it.Annotations[v1.IntegrationImportedKindLabel], + it.Annotations[v1.IntegrationImportedNameLabel], + ) + it.SetReadyConditionError(message) + zero := int32(0) + it.Status.Phase = v1.IntegrationPhaseImportMissing + it.Status.Replicas = &zero + if err = updateSyntheticIntegration(ctx, c, it); err != nil { + log.Errorf(err, "Some error happened while updating a synthetic Integration %s", it.Name) + } + log.Infof("Updated synthetic Integration %s with status %s", it.GetName(), it.Status.Phase) + } else { + log.Errorf(err, "Some error happened while loading object %s from the cluster", ctrlObj.GetName()) + return + } + } else { + // Importing label removed + if err = deleteSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName); err != nil { + log.Errorf(err, "Some error happened while deleting a synthetic Integration %s", integrationName) + } + log.Infof("Deleted synthetic Integration %s", integrationName) + } + } + }, + }) + if err != nil { + return err + } + } + + return nil +} + +func getInformers(ctx context.Context, cl client.Client, c cache.Cache) ([]cache.Informer, error) { + deploy, err := c.GetInformer(ctx, &appsv1.Deployment{}) + if err != nil { + return nil, err + } + informers := []cache.Informer{deploy} + // Watch for the CronJob conditionally + if ok, err := kubernetes.IsAPIResourceInstalled(cl, batchv1.SchemeGroupVersion.String(), reflect.TypeOf(batchv1.CronJob{}).Name()); ok && err == nil { + cron, err := c.GetInformer(ctx, &batchv1.CronJob{}) + if err != nil { + return nil, err + } + informers = append(informers, cron) + } + // Watch for the Knative Services conditionally + if ok, err := kubernetes.IsAPIResourceInstalled(cl, servingv1.SchemeGroupVersion.String(), reflect.TypeOf(servingv1.Service{}).Name()); ok && err == nil { + if ok, err := kubernetes.CheckPermission(ctx, cl, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); ok && err == nil { + ksvc, err := c.GetInformer(ctx, &servingv1.Service{}) + if err != nil { + return nil, err + } + informers = append(informers, ksvc) + } + } + + return informers, nil +} + +func getSyntheticIntegration(ctx context.Context, c client.Client, namespace, name string) (*v1.Integration, error) { + it := v1.NewIntegration(namespace, name) + err := c.Get(ctx, ctrl.ObjectKeyFromObject(&it), &it) + return &it, err +} + +func createSyntheticIntegration(ctx context.Context, c client.Client, it *v1.Integration) error { + return c.Create(ctx, it, ctrl.FieldOwner("camel-k-operator")) +} + +func deleteSyntheticIntegration(ctx context.Context, c client.Client, namespace, name string) error { + // As the Integration label was removed, we don't know which is the Synthetic integration to remove + it := v1.NewIntegration(namespace, name) + return c.Delete(ctx, &it) +} + +func updateSyntheticIntegration(ctx context.Context, c client.Client, it *v1.Integration) error { + return c.Status().Update(ctx, it, ctrl.FieldOwner("camel-k-operator")) +} + +// isManagedObject returns true if the object is managed by an Integration. +func isManagedObject(obj ctrl.Object) bool { + for _, mr := range obj.GetOwnerReferences() { + if mr.APIVersion == "camel.apache.org/v1" && + mr.Kind == "Integration" { + return true + } + } + return false +} + +// nonManagedCamelApplicationAdapter represents a Camel application built and deployed outside the operator lifecycle. +type nonManagedCamelApplicationAdapter interface { + // Integration return an Integration resource fed by the Camel application adapter. + Integration() *v1.Integration +} + +func nonManagedCamelApplicationFactory(obj ctrl.Object) (nonManagedCamelApplicationAdapter, error) { + deploy, ok := obj.(*appsv1.Deployment) + if ok { + return &nonManagedCamelDeployment{deploy: deploy}, nil + } + cronjob, ok := obj.(*batchv1.CronJob) + if ok { + return &NonManagedCamelCronjob{cron: cronjob}, nil + } + ksvc, ok := obj.(*servingv1.Service) + if ok { + return &NonManagedCamelKnativeService{ksvc: ksvc}, nil + } + return nil, fmt.Errorf("unsupported %s object kind", obj) +} + +// NonManagedCamelDeployment represents a regular Camel application built and deployed outside the operator lifecycle. +type nonManagedCamelDeployment struct { + deploy *appsv1.Deployment +} + +// Integration return an Integration resource fed by the Camel application adapter. +func (app *nonManagedCamelDeployment) Integration() *v1.Integration { + it := v1.NewIntegration(app.deploy.Namespace, app.deploy.Labels[v1.IntegrationLabel]) + it.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: app.deploy.Name, + v1.IntegrationImportedKindLabel: "Deployment", + v1.IntegrationSyntheticLabel: "true", + }) + it.Spec = v1.IntegrationSpec{ + Traits: v1.Traits{ + Container: &trait.ContainerTrait{ + Name: app.getContainerNameFromDeployment(), + }, + }, + } + return &it +} + +// getContainerNameFromDeployment returns the container name which is running the Camel application. +func (app *nonManagedCamelDeployment) getContainerNameFromDeployment() string { + firstContainerName := "" + for _, ct := range app.deploy.Spec.Template.Spec.Containers { + // set as fallback if no container is named as the deployment + if firstContainerName == "" { + firstContainerName = app.deploy.Name + } + if ct.Name == app.deploy.Name { + return app.deploy.Name + } + } + return firstContainerName +} + +// NonManagedCamelCronjob represents a cron Camel application built and deployed outside the operator lifecycle. +type NonManagedCamelCronjob struct { + cron *batchv1.CronJob +} + +// Integration return an Integration resource fed by the Camel application adapter. +func (app *NonManagedCamelCronjob) Integration() *v1.Integration { + it := v1.NewIntegration(app.cron.Namespace, app.cron.Labels[v1.IntegrationLabel]) + it.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: app.cron.Name, + v1.IntegrationImportedKindLabel: "CronJob", + v1.IntegrationSyntheticLabel: "true", + }) + it.Spec = v1.IntegrationSpec{ + Traits: v1.Traits{}, + } + return &it +} + +// NonManagedCamelKnativeService represents a Knative Service based Camel application built and deployed outside the operator lifecycle. +type NonManagedCamelKnativeService struct { + ksvc *servingv1.Service +} + +// Integration return an Integration resource fed by the Camel application adapter. +func (app *NonManagedCamelKnativeService) Integration() *v1.Integration { + it := v1.NewIntegration(app.ksvc.Namespace, app.ksvc.Labels[v1.IntegrationLabel]) + it.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: app.ksvc.Name, + v1.IntegrationImportedKindLabel: "KnativeService", + v1.IntegrationSyntheticLabel: "true", + }) + it.Spec = v1.IntegrationSpec{ + Traits: v1.Traits{}, + } + return &it +} From 9e9bdbfa18fffb8d7983b2fff08a58d759fcec16 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Mon, 4 Dec 2023 15:30:55 +0100 Subject: [PATCH 4/8] chore: syntethic Integration unit testing --- pkg/controller/integration/initialize_test.go | 189 +++++++ .../integration/monitor_synthetic_test.go | 489 ++++++++++++++++++ pkg/controller/synthetic/synthetic.go | 4 +- pkg/controller/synthetic/synthetic_test.go | 221 ++++++++ pkg/util/test/client.go | 15 +- 5 files changed, 915 insertions(+), 3 deletions(-) create mode 100644 pkg/controller/integration/initialize_test.go create mode 100644 pkg/controller/integration/monitor_synthetic_test.go create mode 100644 pkg/controller/synthetic/synthetic_test.go diff --git a/pkg/controller/integration/initialize_test.go b/pkg/controller/integration/initialize_test.go new file mode 100644 index 0000000000..2beaaa798b --- /dev/null +++ b/pkg/controller/integration/initialize_test.go @@ -0,0 +1,189 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + + "github.com/apache/camel-k/v2/pkg/util/log" + "github.com/apache/camel-k/v2/pkg/util/test" + + "github.com/stretchr/testify/assert" +) + +func TestCamelImportDeployment(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-deploy", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "Deployment", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseInitialization, + }, + } + c, err := test.NewFakeClient(importedIt) + assert.Nil(t, err) + + a := initializeAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "initialize", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseRunning, handledIt.Status.Phase) + // Ready condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionDeploymentReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "imported from my-deploy Deployment", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) + // Deployment condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionDeploymentAvailable).Status) + assert.Equal(t, v1.IntegrationConditionDeploymentAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionDeploymentAvailable).Reason) + assert.Equal(t, "imported from my-deploy Deployment", handledIt.Status.GetCondition(v1.IntegrationConditionDeploymentAvailable).Message) +} + +func TestCamelImportCronJob(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-cron", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "CronJob", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseInitialization, + }, + } + c, err := test.NewFakeClient(importedIt) + assert.Nil(t, err) + + a := initializeAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "initialize", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseRunning, handledIt.Status.Phase) + // Ready condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionDeploymentReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "imported from my-cron CronJob", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) + // CronJob condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionCronJobAvailable).Status) + assert.Equal(t, v1.IntegrationConditionCronJobCreatedReason, handledIt.Status.GetCondition(v1.IntegrationConditionCronJobAvailable).Reason) + assert.Equal(t, "imported from my-cron CronJob", handledIt.Status.GetCondition(v1.IntegrationConditionCronJobAvailable).Message) +} + +func TestCamelImportKnativeService(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-ksvc", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "KnativeService", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseInitialization, + }, + } + c, err := test.NewFakeClient(importedIt) + assert.Nil(t, err) + + a := initializeAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "initialize", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseRunning, handledIt.Status.Phase) + // Ready condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionKnativeServiceReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "imported from my-ksvc KnativeService", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) + // Knative Service condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionKnativeServiceAvailable).Status) + assert.Equal(t, v1.IntegrationConditionKnativeServiceAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionKnativeServiceAvailable).Reason) + assert.Equal(t, "imported from my-ksvc KnativeService", handledIt.Status.GetCondition(v1.IntegrationConditionKnativeServiceAvailable).Message) +} + +func TestCamelImportUnsupportedKind(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-kind", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "SomeKind", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseInitialization, + }, + } + c, err := test.NewFakeClient(importedIt) + assert.Nil(t, err) + + a := initializeAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "initialize", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseError, handledIt.Status.Phase) + // Ready condition + assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionImportingKindAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "Unsupported SomeKind import kind", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) +} diff --git a/pkg/controller/integration/monitor_synthetic_test.go b/pkg/controller/integration/monitor_synthetic_test.go new file mode 100644 index 0000000000..c2217218a2 --- /dev/null +++ b/pkg/controller/integration/monitor_synthetic_test.go @@ -0,0 +1,489 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + "github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait" + + "github.com/apache/camel-k/v2/pkg/util/log" + "github.com/apache/camel-k/v2/pkg/util/test" + + "github.com/stretchr/testify/assert" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestMonitorSyntheticIntegrationImportingKindUnavailable(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-deploy", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "SomeKind", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseRunning, + }, + } + c, err := test.NewFakeClient(importedIt) + assert.Nil(t, err) + + a := monitorSyntheticAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "monitor-synthetic", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.NotNil(t, err) + assert.Equal(t, v1.IntegrationPhaseError, handledIt.Status.Phase) + assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionImportingKindAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "cannot create a synthetic environment for SomeKind kind", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) +} + +func TestMonitorSyntheticIntegrationCannotMonitorPods(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-deploy", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "Deployment", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseRunning, + Conditions: []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionDeploymentAvailable, + Status: corev1.ConditionTrue, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + deploy := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-deploy", + Annotations: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + } + c, err := test.NewFakeClient(importedIt, deploy) + assert.Nil(t, err) + + a := monitorSyntheticAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "monitor-synthetic", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseCannotMonitor, handledIt.Status.Phase) + // Ready condition should be still true + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + // Check monitoring pods condition + assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) + assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) + assert.Equal(t, "Could not find `camel.apache.org/integration: my-imported-it` label in the Deployment/my-deploy template. Make sure to include this label in the template for Pod monitoring purposes.", handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Message) +} + +func TestMonitorSyntheticIntegrationDeployment(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-deploy", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "Deployment", + }, + }, + Spec: v1.IntegrationSpec{ + Traits: v1.Traits{ + Container: &trait.ContainerTrait{ + Name: "my-cnt", + }, + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseRunning, + Conditions: []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionDeploymentAvailable, + Status: corev1.ConditionTrue, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + deploy := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-deploy", + Annotations: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + }, + }, + } + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-pod", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + c, err := test.NewFakeClient(importedIt, deploy, pod) + assert.Nil(t, err) + + a := monitorSyntheticAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "monitor-synthetic", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseRunning, handledIt.Status.Phase) + assert.Equal(t, int32(1), *handledIt.Status.Replicas) + // Ready condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionDeploymentReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "1/1 ready replicas", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) + // Check monitoring pods condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) + assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) +} + +func TestMonitorSyntheticIntegrationCronJob(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-cron", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "CronJob", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseRunning, + Conditions: []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionCronJobAvailable, + Status: corev1.ConditionTrue, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + cron := &batchv1.CronJob{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "CronJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-cron", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + }, + }, + }, + }, + } + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-pod", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + c, err := test.NewFakeClient(importedIt, cron, pod) + assert.Nil(t, err) + + a := monitorSyntheticAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "monitor-synthetic", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseRunning, handledIt.Status.Phase) + assert.Equal(t, int32(1), *handledIt.Status.Replicas) + // Ready condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionCronJobCreatedReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "cronjob created", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) + // Check monitoring pods condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) + assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) +} + +func TestMonitorSyntheticIntegrationKnativeService(t *testing.T) { + importedIt := &v1.Integration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: v1.IntegrationKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-imported-it", + Annotations: map[string]string{ + v1.IntegrationImportedNameLabel: "my-ksvc", + v1.IntegrationSyntheticLabel: "true", + v1.IntegrationImportedKindLabel: "KnativeService", + }, + }, + Status: v1.IntegrationStatus{ + Phase: v1.IntegrationPhaseRunning, + Conditions: []v1.IntegrationCondition{ + { + Type: v1.IntegrationConditionKnativeServiceAvailable, + Status: corev1.ConditionTrue, + }, + { + Type: v1.IntegrationConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + ksvc := &servingv1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-ksvc", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: servingv1.ServiceSpec{ + ConfigurationSpec: servingv1.ConfigurationSpec{ + Template: servingv1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: servingv1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + }, + }, + }, + }, + Status: servingv1.ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{ + apis.Condition{ + Type: servingv1.ServiceConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + } + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-pod", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + c, err := test.NewFakeClient(importedIt, ksvc, pod) + assert.Nil(t, err) + + a := monitorSyntheticAction{} + a.InjectLogger(log.Log) + a.InjectClient(c) + assert.Equal(t, "monitor-synthetic", a.Name()) + assert.True(t, a.CanHandle(importedIt)) + handledIt, err := a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Equal(t, v1.IntegrationPhaseRunning, handledIt.Status.Phase) + assert.Equal(t, int32(1), *handledIt.Status.Replicas) + // Ready condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, v1.IntegrationConditionKnativeServiceReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + // Check monitoring pods condition + assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) + assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) +} diff --git a/pkg/controller/synthetic/synthetic.go b/pkg/controller/synthetic/synthetic.go index bd785d3185..c5f7bbb342 100644 --- a/pkg/controller/synthetic/synthetic.go +++ b/pkg/controller/synthetic/synthetic.go @@ -220,7 +220,7 @@ func nonManagedCamelApplicationFactory(obj ctrl.Object) (nonManagedCamelApplicat if ok { return &NonManagedCamelKnativeService{ksvc: ksvc}, nil } - return nil, fmt.Errorf("unsupported %s object kind", obj) + return nil, fmt.Errorf("unsupported %s object kind", obj.GetName()) } // NonManagedCamelDeployment represents a regular Camel application built and deployed outside the operator lifecycle. @@ -252,7 +252,7 @@ func (app *nonManagedCamelDeployment) getContainerNameFromDeployment() string { for _, ct := range app.deploy.Spec.Template.Spec.Containers { // set as fallback if no container is named as the deployment if firstContainerName == "" { - firstContainerName = app.deploy.Name + firstContainerName = ct.Name } if ct.Name == app.deploy.Name { return app.deploy.Name diff --git a/pkg/controller/synthetic/synthetic_test.go b/pkg/controller/synthetic/synthetic_test.go new file mode 100644 index 0000000000..c600f6d3e9 --- /dev/null +++ b/pkg/controller/synthetic/synthetic_test.go @@ -0,0 +1,221 @@ +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synthetic + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + "github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait" + + "github.com/stretchr/testify/assert" +) + +func TestNonManagedUnsupported(t *testing.T) { + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-pod", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + + nilAdapter, err := nonManagedCamelApplicationFactory(pod) + assert.NotNil(t, err) + assert.Equal(t, "unsupported my-pod object kind", err.Error()) + assert.Nil(t, nilAdapter) +} + +func TestNonManagedDeployment(t *testing.T) { + deploy := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-deploy", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + }, + }, + } + + expectedIt := v1.NewIntegration("ns", "my-imported-it") + expectedIt.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: "my-deploy", + v1.IntegrationImportedKindLabel: "Deployment", + v1.IntegrationSyntheticLabel: "true", + }) + expectedIt.Spec = v1.IntegrationSpec{ + Traits: v1.Traits{ + Container: &trait.ContainerTrait{ + Name: "my-cnt", + }, + }, + } + + deploymentAdapter, err := nonManagedCamelApplicationFactory(deploy) + assert.Nil(t, err) + assert.NotNil(t, deploymentAdapter) + assert.Equal(t, expectedIt, *deploymentAdapter.Integration()) +} + +func TestNonManagedCronJob(t *testing.T) { + cron := &batchv1.CronJob{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "CronJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-cron", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + }, + }, + }, + }, + } + + expectedIt := v1.NewIntegration("ns", "my-imported-it") + expectedIt.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: "my-cron", + v1.IntegrationImportedKindLabel: "CronJob", + v1.IntegrationSyntheticLabel: "true", + }) + + cronJobAdapter, err := nonManagedCamelApplicationFactory(cron) + assert.Nil(t, err) + assert.NotNil(t, cronJobAdapter) + assert.Equal(t, expectedIt, *cronJobAdapter.Integration()) +} + +func TestNonManagedKnativeService(t *testing.T) { + ksvc := &servingv1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "my-ksvc", + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: servingv1.ServiceSpec{ + ConfigurationSpec: servingv1.ConfigurationSpec{ + Template: servingv1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.IntegrationLabel: "my-imported-it", + }, + }, + Spec: servingv1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-cnt", + Image: "my-img", + }, + }, + }, + }, + }, + }, + }, + } + + expectedIt := v1.NewIntegration("ns", "my-imported-it") + expectedIt.SetAnnotations(map[string]string{ + v1.IntegrationImportedNameLabel: "my-ksvc", + v1.IntegrationImportedKindLabel: "KnativeService", + v1.IntegrationSyntheticLabel: "true", + }) + + knativeServiceAdapter, err := nonManagedCamelApplicationFactory(ksvc) + assert.Nil(t, err) + assert.NotNil(t, knativeServiceAdapter) + assert.Equal(t, expectedIt, *knativeServiceAdapter.Integration()) +} diff --git a/pkg/util/test/client.go b/pkg/util/test/client.go index fef78d2b62..9105719ebf 100644 --- a/pkg/util/test/client.go +++ b/pkg/util/test/client.go @@ -30,6 +30,7 @@ import ( camelv1alpha1 "github.com/apache/camel-k/v2/pkg/client/camel/clientset/versioned/typed/camel/v1alpha1" "github.com/apache/camel-k/v2/pkg/util" autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -55,7 +56,19 @@ func NewFakeClient(initObjs ...runtime.Object) (client.Client, error) { return nil, err } - c := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjs...).Build() + c := fake. + NewClientBuilder(). + WithScheme(scheme). + WithIndex( + &corev1.Pod{}, + "status.phase", + func(obj controller.Object) []string { + pod, _ := obj.(*corev1.Pod) + return []string{string(pod.Status.Phase)} + }, + ). + WithRuntimeObjects(initObjs...). + Build() camelClientset := fakecamelclientset.NewSimpleClientset(filterObjects(scheme, initObjs, func(gvk schema.GroupVersionKind) bool { return strings.Contains(gvk.Group, "camel") From a3d2c0e56c847768a047228a7c2ca4f8117e2477 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Tue, 5 Dec 2023 09:39:48 +0100 Subject: [PATCH 5/8] chore(e2e): synthetic integrations --- e2e/common/synthetic/default.go | 26 ++++++ e2e/common/synthetic/files/deploy.yaml | 50 ++++++++++ e2e/common/synthetic/synthetic_test.go | 93 +++++++++++++++++++ e2e/support/test_support.go | 8 ++ .../integration/monitor_synthetic.go | 19 +++- script/Makefile | 1 + 6 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 e2e/common/synthetic/default.go create mode 100644 e2e/common/synthetic/files/deploy.yaml create mode 100644 e2e/common/synthetic/synthetic_test.go diff --git a/e2e/common/synthetic/default.go b/e2e/common/synthetic/default.go new file mode 100644 index 0000000000..a7e504e469 --- /dev/null +++ b/e2e/common/synthetic/default.go @@ -0,0 +1,26 @@ +//go:build integration +// +build integration + +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synthetic + +import "github.com/apache/camel-k/v2/e2e/support" + +var ns = support.GetEnvOrDefault("CAMEL_K_TEST_NAMESPACE", support.GetCIProcessID()) +var operatorID = support.GetEnvOrDefault("CAMEL_K_OPERATOR_ID", support.GetCIProcessID()) diff --git a/e2e/common/synthetic/files/deploy.yaml b/e2e/common/synthetic/files/deploy.yaml new file mode 100644 index 0000000000..89921a04ac --- /dev/null +++ b/e2e/common/synthetic/files/deploy.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: v1 +data: + my-file.txt: hello +kind: ConfigMap +metadata: + name: my-cm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: my-camel-sb-svc + name: my-camel-sb-svc +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: my-camel-sb-svc + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: my-camel-sb-svc + spec: + containers: + - image: docker.io/squakez/my-camel-sb-svc:1.0.0 + imagePullPolicy: IfNotPresent + name: my-camel-sb-svc + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - name: my-cm + mountPath: /tmp/app/data + volumes: + - name: my-cm + configMap: + name: my-cm + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 diff --git a/e2e/common/synthetic/synthetic_test.go b/e2e/common/synthetic/synthetic_test.go new file mode 100644 index 0000000000..f9f1532ce3 --- /dev/null +++ b/e2e/common/synthetic/synthetic_test.go @@ -0,0 +1,93 @@ +//go:build integration +// +build integration + +// To enable compilation of this file in Goland, go to "Settings -> Go -> Vendoring & Build Tags -> Custom Tags" and add "integration" + +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synthetic + +import ( + "testing" + + . "github.com/onsi/gomega" + + . "github.com/apache/camel-k/v2/e2e/support" + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + . "github.com/onsi/gomega/gstruct" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func TestSyntheticIntegrationFromDeployment(t *testing.T) { + RegisterTestingT(t) + + // Run the external deployment + ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) + Eventually(DeploymentCondition(ns, "my-camel-sb-svc", appsv1.DeploymentProgressing), TestTimeoutShort). + Should(MatchFields(IgnoreExtras, Fields{ + "Status": Equal(corev1.ConditionTrue), + "Reason": Equal("NewReplicaSetAvailable"), + })) + + // Label the deployment --> Verify the Integration is created (cannot still monitor) + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseCannotMonitor)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + + // Label the deployment template --> Verify the Integration is monitored + ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + one := int32(1) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) + + // Delete the deployment --> Verify the Integration is in missing status + ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseImportMissing)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse)) + zero := int32(0) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&zero)) + + // Recreate the deployment and label --> Verify the Integration is monitored + ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) + + // Remove label from the deployment --> Verify the Integration is deleted + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration-", "-n", ns)) + Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) + + // Add label back to the deployment --> Verify the Integration is created + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) + // Scale the deployment --> verify replicas are correctly set + ExpectExecSucceed(t, Kubectl("scale", "deploy", "my-camel-sb-svc", "--replicas", "2", "-n", ns)) + two := int32(2) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&two)) + + // Delete Integration and deployments --> verify no Integration exists any longer + Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) + ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) + Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) +} diff --git a/e2e/support/test_support.go b/e2e/support/test_support.go index 4b67c6dc65..2743b050cc 100644 --- a/e2e/support/test_support.go +++ b/e2e/support/test_support.go @@ -481,6 +481,14 @@ func MakeWithContext(ctx context.Context, rule string, args ...string) *exec.Cmd return exec.Command("make", args...) } +func Kubectl(args ...string) *exec.Cmd { + return KubectlWithContext(TestContext, args...) +} + +func KubectlWithContext(ctx context.Context, args ...string) *exec.Cmd { + return exec.Command("kubectl", args...) +} + // ============================================================================= // Curried utility functions for testing // ============================================================================= diff --git a/pkg/controller/integration/monitor_synthetic.go b/pkg/controller/integration/monitor_synthetic.go index a1aa86a43d..cee6221632 100644 --- a/pkg/controller/integration/monitor_synthetic.go +++ b/pkg/controller/integration/monitor_synthetic.go @@ -19,11 +19,13 @@ package integration import ( "context" + "fmt" corev1 "k8s.io/api/core/v1" v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" "github.com/apache/camel-k/v2/pkg/trait" + k8serrors "k8s.io/apimachinery/pkg/api/errors" ) // NewMonitorSyntheticAction is an action used to monitor synthetic Integrations. @@ -42,7 +44,22 @@ func (action *monitorSyntheticAction) Name() string { func (action *monitorSyntheticAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) { environment, err := trait.NewSyntheticEnvironment(ctx, action.client, integration, nil) if err != nil { - // report the error + // Importing application no longer available + if k8serrors.IsNotFound(err) { + // It could be a normal condition, don't report as an error + integration.Status.Phase = v1.IntegrationPhaseImportMissing + message := fmt.Sprintf( + "import %s %s no longer available", + integration.Annotations[v1.IntegrationImportedKindLabel], + integration.Annotations[v1.IntegrationImportedNameLabel], + ) + integration.SetReadyConditionError(message) + zero := int32(0) + integration.Status.Phase = v1.IntegrationPhaseImportMissing + integration.Status.Replicas = &zero + return integration, nil + } + // other reasons, likely some error to report integration.Status.Phase = v1.IntegrationPhaseError integration.SetReadyCondition(corev1.ConditionFalse, v1.IntegrationConditionImportingKindAvailableReason, err.Error()) return integration, err diff --git a/script/Makefile b/script/Makefile index be5391bd42..282357f8bd 100644 --- a/script/Makefile +++ b/script/Makefile @@ -262,6 +262,7 @@ test-common: do-build go test -timeout 30m -v ./e2e/common/misc -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ go test -timeout 60m -v ./e2e/common/traits -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ go test -timeout 20m -v ./e2e/common/runtimes -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ + go test -timeout 10m -v ./e2e/common/synthetic -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ go test -timeout 10m -v ./e2e/common/support/teardown_test.go -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ exit $${FAILED} From 63e857feb7e4aee016e428b1033b739e78e40dc1 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Tue, 5 Dec 2023 10:15:21 +0100 Subject: [PATCH 6/8] doc: synthetic Integrations --- config/manager/operator-deployment.yaml | 3 + docs/modules/ROOT/nav.adoc | 3 +- docs/modules/ROOT/pages/running/import.adoc | 44 ++++++++ e2e/common/synthetic/default.go | 26 ----- e2e/common/synthetic/synthetic_test.go | 93 ---------------- .../files/deploy.yaml | 0 e2e/commonwithcustominstall/synthetic_test.go | 100 ++++++++++++++++++ pkg/cmd/operator/operator.go | 9 +- script/Makefile | 1 - 9 files changed, 156 insertions(+), 123 deletions(-) create mode 100644 docs/modules/ROOT/pages/running/import.adoc delete mode 100644 e2e/common/synthetic/default.go delete mode 100644 e2e/common/synthetic/synthetic_test.go rename e2e/{common/synthetic => commonwithcustominstall}/files/deploy.yaml (100%) create mode 100644 e2e/commonwithcustominstall/synthetic_test.go diff --git a/config/manager/operator-deployment.yaml b/config/manager/operator-deployment.yaml index ae47c6e297..f44e150d53 100644 --- a/config/manager/operator-deployment.yaml +++ b/config/manager/operator-deployment.yaml @@ -72,6 +72,9 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + # Change to true to be able to create synthetic Integrations + - name: CAMEL_K_SYNTHETIC_INTEGRATIONS + value: "false" livenessProbe: httpGet: path: /healthz diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 017b36dfc2..20fb04f30a 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -20,8 +20,9 @@ ** xref:running/dev-mode.adoc[Developer mode] ** xref:running/dry-run.adoc[Dry run] ** xref:running/runtime-version.adoc[Camel version] -** xref:running/camel-runtimes.adoc[Camel runtimes] ** xref:running/quarkus-native.adoc[Quarkus Native] +** xref:running/camel-runtimes.adoc[Camel runtimes] +** xref:running/import.adoc[Import existing Camel apps] ** xref:running/run-from-github.adoc[Run from GitHub] ** xref:running/promoting.adoc[Promote an Integration] ** xref:running/knative-sink.adoc[Knative Sinks] diff --git a/docs/modules/ROOT/pages/running/import.adoc b/docs/modules/ROOT/pages/running/import.adoc new file mode 100644 index 0000000000..c09552121e --- /dev/null +++ b/docs/modules/ROOT/pages/running/import.adoc @@ -0,0 +1,44 @@ += Importing existing Camel applications + +You may have already a Camel application running on your cluster. You may have created it via a manual deployment, a CICD or any other deployment mechanism you have in place. Since the Camel K operator is meant to operate any Camel application out there, then, you will be able to import it and monitor in a similar fashion of any other Camel K **managed Integration**. + +This feature is disabled by default. In order to enable it, you need to run the operator deployment with an environment variable, `CAMEL_K_SYNTHETIC_INTEGRATIONS`, set to `true`. + +NOTE: you will be only able to monitor the synthetic Integrations. Camel K won't be able to alter the lifecycle of non managed Integrations (ie, rebuild the original application). + +It's important to notice that the operator won't be altering any field of the original application in order to avoid breaking any deployment procedure which is already in place. As it cannot make any assumption on the way the application is built and deployed, it will only be able to **watch** for any changes happening around it. + +[[deploy-and-monitor]] +== Deploy externally, monitor via Camel K Operator + +An imported Integration is known as **synthetic Integration**. You can import any Camel application deployed as a **Deployment**, **CronJob** or **Knative Service**. We control this behavior via a label (`camel.apache.org/integration`) that the user need to apply on the Camel application (either manually or introducing in the deployment process, ie, via CICD). + +NOTE: the example here will work in a similar way using CronJob and Knative Service. + +As an example, we show how to import a Camel application which was deployed with the Deployment kind. Let's assume it is called `my-deploy`. +``` +$ kubectl label deploy my-camel-sb-svc camel.apache.org/integration=my-it +``` +The operator immediately creates a synthetic Integration: +``` +$ kubectl get it +NAMESPACE NAME PHASE RUNTIME PROVIDER RUNTIME VERSION KIT REPLICAS +test-79c385c3-d58e-4c28-826d-b14b6245f908 my-it Cannot Monitor Pods +``` +You can see it will be in `Cannot Monitor Pods` status phase. This is expected because the way Camel K operator monitor Pods. It requires that the same label applied to the Deployment is inherited by the generated Pods. For this reason, beside labelling the Deployment, we need to add a label in the Deployment template. +``` +$ kubectl patch deployment my-camel-sb-svc --patch '{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}' +``` +Also this operator can be performed manually or automated in the deployment procedure. We can see now that the operator will be able to monitor accordingly the status of the Pods: +``` +$ kubectl get it +NAMESPACE NAME PHASE RUNTIME PROVIDER RUNTIME VERSION KIT REPLICAS +test-79c385c3-d58e-4c28-826d-b14b6245f908 my-it Running 1 +``` +From now on, you will be able to monitor the status of the synthetic Integration in a similar fashion of what you do with managed Integrations. If, for example, your Deployment will scale up or down, then, you will see this information reflecting accordingly: +``` +$ kubectl scale deployment my-camel-sb-svc --replicas 2 +$ kubectl get it +NAMESPACE NAME PHASE RUNTIME PROVIDER RUNTIME VERSION KIT REPLICAS +test-79c385c3-d58e-4c28-826d-b14b6245f908 my-it Running 2 +``` diff --git a/e2e/common/synthetic/default.go b/e2e/common/synthetic/default.go deleted file mode 100644 index a7e504e469..0000000000 --- a/e2e/common/synthetic/default.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build integration -// +build integration - -/* -Licensed to the Apache Software Foundation (ASF) under one or more -contributor license agreements. See the NOTICE file distributed with -this work for additional information regarding copyright ownership. -The ASF licenses this file to You under the Apache License, Version 2.0 -(the "License"); you may not use this file except in compliance with -the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synthetic - -import "github.com/apache/camel-k/v2/e2e/support" - -var ns = support.GetEnvOrDefault("CAMEL_K_TEST_NAMESPACE", support.GetCIProcessID()) -var operatorID = support.GetEnvOrDefault("CAMEL_K_OPERATOR_ID", support.GetCIProcessID()) diff --git a/e2e/common/synthetic/synthetic_test.go b/e2e/common/synthetic/synthetic_test.go deleted file mode 100644 index f9f1532ce3..0000000000 --- a/e2e/common/synthetic/synthetic_test.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build integration -// +build integration - -// To enable compilation of this file in Goland, go to "Settings -> Go -> Vendoring & Build Tags -> Custom Tags" and add "integration" - -/* -Licensed to the Apache Software Foundation (ASF) under one or more -contributor license agreements. See the NOTICE file distributed with -this work for additional information regarding copyright ownership. -The ASF licenses this file to You under the Apache License, Version 2.0 -(the "License"); you may not use this file except in compliance with -the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synthetic - -import ( - "testing" - - . "github.com/onsi/gomega" - - . "github.com/apache/camel-k/v2/e2e/support" - v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" - . "github.com/onsi/gomega/gstruct" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" -) - -func TestSyntheticIntegrationFromDeployment(t *testing.T) { - RegisterTestingT(t) - - // Run the external deployment - ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) - Eventually(DeploymentCondition(ns, "my-camel-sb-svc", appsv1.DeploymentProgressing), TestTimeoutShort). - Should(MatchFields(IgnoreExtras, Fields{ - "Status": Equal(corev1.ConditionTrue), - "Reason": Equal("NewReplicaSetAvailable"), - })) - - // Label the deployment --> Verify the Integration is created (cannot still monitor) - ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseCannotMonitor)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) - - // Label the deployment template --> Verify the Integration is monitored - ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) - one := int32(1) - Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) - - // Delete the deployment --> Verify the Integration is in missing status - ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseImportMissing)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse)) - zero := int32(0) - Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&zero)) - - // Recreate the deployment and label --> Verify the Integration is monitored - ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) - ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) - ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) - Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) - - // Remove label from the deployment --> Verify the Integration is deleted - ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration-", "-n", ns)) - Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) - - // Add label back to the deployment --> Verify the Integration is created - ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) - Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) - // Scale the deployment --> verify replicas are correctly set - ExpectExecSucceed(t, Kubectl("scale", "deploy", "my-camel-sb-svc", "--replicas", "2", "-n", ns)) - two := int32(2) - Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&two)) - - // Delete Integration and deployments --> verify no Integration exists any longer - Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) - ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) - Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) -} diff --git a/e2e/common/synthetic/files/deploy.yaml b/e2e/commonwithcustominstall/files/deploy.yaml similarity index 100% rename from e2e/common/synthetic/files/deploy.yaml rename to e2e/commonwithcustominstall/files/deploy.yaml diff --git a/e2e/commonwithcustominstall/synthetic_test.go b/e2e/commonwithcustominstall/synthetic_test.go new file mode 100644 index 0000000000..a1b92f40ec --- /dev/null +++ b/e2e/commonwithcustominstall/synthetic_test.go @@ -0,0 +1,100 @@ +//go:build integration +// +build integration + +// To enable compilation of this file in Goland, go to "Settings -> Go -> Vendoring & Build Tags -> Custom Tags" and add "integration" + +/* +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to You under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commonwithcustominstall + +import ( + "testing" + + . "github.com/onsi/gomega" + + . "github.com/apache/camel-k/v2/e2e/support" + v1 "github.com/apache/camel-k/v2/pkg/apis/camel/v1" + . "github.com/onsi/gomega/gstruct" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func TestSyntheticIntegrationFromDeployment(t *testing.T) { + RegisterTestingT(t) + WithNewTestNamespace(t, func(ns string) { + // Install Camel K with the synthetic Integration feature variable + operatorID := "camel-k-synthetic-env" + Expect(KamelInstallWithID(operatorID, ns, + "--operator-env-vars", "CAMEL_K_SYNTHETIC_INTEGRATIONS=true", + ).Execute()).To(Succeed()) + + // Run the external deployment + ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) + Eventually(DeploymentCondition(ns, "my-camel-sb-svc", appsv1.DeploymentProgressing), TestTimeoutShort). + Should(MatchFields(IgnoreExtras, Fields{ + "Status": Equal(corev1.ConditionTrue), + "Reason": Equal("NewReplicaSetAvailable"), + })) + + // Label the deployment --> Verify the Integration is created (cannot still monitor) + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseCannotMonitor)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + + // Label the deployment template --> Verify the Integration is monitored + ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + one := int32(1) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) + + // Delete the deployment --> Verify the Integration is in missing status + ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseImportMissing)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse)) + zero := int32(0) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&zero)) + + // Recreate the deployment and label --> Verify the Integration is monitored + ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) + + // Remove label from the deployment --> Verify the Integration is deleted + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration-", "-n", ns)) + Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) + + // Add label back to the deployment --> Verify the Integration is created + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) + // Scale the deployment --> verify replicas are correctly set + ExpectExecSucceed(t, Kubectl("scale", "deploy", "my-camel-sb-svc", "--replicas", "2", "-n", ns)) + two := int32(2) + Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&two)) + + // Delete Integration and deployments --> verify no Integration exists any longer + Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) + ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) + Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) + }) +} diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go index 12edd7cc11..97996f48c7 100644 --- a/pkg/cmd/operator/operator.go +++ b/pkg/cmd/operator/operator.go @@ -232,8 +232,13 @@ func Run(healthPort, monitoringPort int32, leaderElection bool, leaderElectionID install.OperatorStartupOptionalTools(installCtx, bootstrapClient, watchNamespace, operatorNamespace, log) exitOnError(findOrCreateIntegrationPlatform(installCtx, bootstrapClient, operatorNamespace), "failed to create integration platform") - log.Info("Starting the synthetic Integration manager") - exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache(), mgr.GetAPIReader()), "synthetic Integration manager error") + synthEnvVal, synth := os.LookupEnv("CAMEL_K_SYNTHETIC_INTEGRATIONS") + if synth && synthEnvVal == "true" { + log.Info("Starting the synthetic Integration manager") + exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache(), mgr.GetAPIReader()), "synthetic Integration manager error") + } else { + log.Info("Synthetic Integration manager not configured, skipping") + } log.Info("Starting the manager") exitOnError(mgr.Start(ctx), "manager exited non-zero") } diff --git a/script/Makefile b/script/Makefile index 282357f8bd..be5391bd42 100644 --- a/script/Makefile +++ b/script/Makefile @@ -262,7 +262,6 @@ test-common: do-build go test -timeout 30m -v ./e2e/common/misc -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ go test -timeout 60m -v ./e2e/common/traits -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ go test -timeout 20m -v ./e2e/common/runtimes -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ - go test -timeout 10m -v ./e2e/common/synthetic -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ go test -timeout 10m -v ./e2e/common/support/teardown_test.go -tags=integration $(TEST_INTEGRATION_COMMON_LANG_RUN) $(GOTESTFMT) || FAILED=1; \ exit $${FAILED} From 57ae5cc9ee7b1796d09a8860f1ba74e5f23485eb Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Wed, 3 Jan 2024 15:26:48 +0100 Subject: [PATCH 7/8] chore: monitor when missing delete event --- pkg/controller/integration/monitor_synthetic.go | 11 +++++++++++ .../integration/monitor_synthetic_test.go | 13 +++++++++++-- pkg/trait/trait.go | 7 ++++++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/pkg/controller/integration/monitor_synthetic.go b/pkg/controller/integration/monitor_synthetic.go index cee6221632..a51758814e 100644 --- a/pkg/controller/integration/monitor_synthetic.go +++ b/pkg/controller/integration/monitor_synthetic.go @@ -65,5 +65,16 @@ func (action *monitorSyntheticAction) Handle(ctx context.Context, integration *v return integration, err } + if environment == nil { + // The application which generated the Integration has no longer the importing label. We may have missed the + // delete event, therefore we need to perform the operation here. + err := action.client.Delete(ctx, integration) + action.L.Infof("Deleting synthetic Integration %s", integration.Name) + if err != nil { + return integration, err + } + return nil, nil + } + return action.monitorPods(ctx, environment, integration) } diff --git a/pkg/controller/integration/monitor_synthetic_test.go b/pkg/controller/integration/monitor_synthetic_test.go index c2217218a2..aa1f9b2325 100644 --- a/pkg/controller/integration/monitor_synthetic_test.go +++ b/pkg/controller/integration/monitor_synthetic_test.go @@ -110,7 +110,7 @@ func TestMonitorSyntheticIntegrationCannotMonitorPods(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: "ns", Name: "my-deploy", - Annotations: map[string]string{ + Labels: map[string]string{ v1.IntegrationLabel: "my-imported-it", }, }, @@ -178,7 +178,7 @@ func TestMonitorSyntheticIntegrationDeployment(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: "ns", Name: "my-deploy", - Annotations: map[string]string{ + Labels: map[string]string{ v1.IntegrationLabel: "my-imported-it", }, }, @@ -249,6 +249,15 @@ func TestMonitorSyntheticIntegrationDeployment(t *testing.T) { // Check monitoring pods condition assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) + + // Remove label from deployment + deploy.Labels = nil + c, err = test.NewFakeClient(importedIt, deploy) + assert.Nil(t, err) + a.InjectClient(c) + handledIt, err = a.Handle(context.TODO(), importedIt) + assert.Nil(t, err) + assert.Nil(t, handledIt) } func TestMonitorSyntheticIntegrationCronJob(t *testing.T) { diff --git a/pkg/trait/trait.go b/pkg/trait/trait.go index 16794ee12d..059b294f14 100644 --- a/pkg/trait/trait.go +++ b/pkg/trait/trait.go @@ -137,7 +137,8 @@ func newEnvironment(ctx context.Context, c client.Client, integration *v1.Integr return &env, nil } -// NewSyntheticEnvironment creates an environment suitable for a synthetic Integration. +// NewSyntheticEnvironment creates an environment suitable for a synthetic Integration. If the application which generated the synthetic Integration +// has no longer the label, it will return a nil result. func NewSyntheticEnvironment(ctx context.Context, c client.Client, integration *v1.Integration, kit *v1.IntegrationKit) (*Environment, error) { if integration == nil && kit == nil { return nil, errors.New("neither integration nor kit are set") @@ -173,6 +174,10 @@ func NewSyntheticEnvironment(ctx context.Context, c client.Client, integration * if err != nil { return nil, err } + // Verify if the application has still the expected label. If not, return nil. + if camelApp.GetLabels()[v1.IntegrationLabel] != integration.Name { + return nil, nil + } env.Resources.Add(camelApp) return &env, nil From 802aea9025d52dc63bcc8ef22265ce821ff8cc83 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Wed, 3 Jan 2024 16:19:07 +0100 Subject: [PATCH 8/8] chore: syntetic Integration ownership --- docs/modules/ROOT/pages/running/import.adoc | 6 +- e2e/commonwithcustominstall/synthetic_test.go | 33 +++++-- pkg/apis/camel/v1/integration_types.go | 6 -- pkg/cmd/operator/operator.go | 2 +- .../integration/integration_controller.go | 25 +++-- pkg/controller/integration/monitor.go | 19 +--- .../integration/monitor_synthetic.go | 15 +-- .../integration/monitor_synthetic_test.go | 18 +--- pkg/controller/synthetic/synthetic.go | 98 +++++++++---------- pkg/controller/synthetic/synthetic_test.go | 36 ++++++- 10 files changed, 136 insertions(+), 122 deletions(-) diff --git a/docs/modules/ROOT/pages/running/import.adoc b/docs/modules/ROOT/pages/running/import.adoc index c09552121e..9fa8099b8f 100644 --- a/docs/modules/ROOT/pages/running/import.adoc +++ b/docs/modules/ROOT/pages/running/import.adoc @@ -23,13 +23,13 @@ The operator immediately creates a synthetic Integration: ``` $ kubectl get it NAMESPACE NAME PHASE RUNTIME PROVIDER RUNTIME VERSION KIT REPLICAS -test-79c385c3-d58e-4c28-826d-b14b6245f908 my-it Cannot Monitor Pods +test-79c385c3-d58e-4c28-826d-b14b6245f908 my-it Running ``` -You can see it will be in `Cannot Monitor Pods` status phase. This is expected because the way Camel K operator monitor Pods. It requires that the same label applied to the Deployment is inherited by the generated Pods. For this reason, beside labelling the Deployment, we need to add a label in the Deployment template. +You can see it will be in `Running` status phase. However, checking the conditions you will be able to see that the Integration is not yet able to be fully monitored. This is expected because the way Camel K operator monitor Pods. It requires that the same label applied to the Deployment is inherited by the generated Pods. For this reason, beside labelling the Deployment, we need to add a label in the Deployment template. ``` $ kubectl patch deployment my-camel-sb-svc --patch '{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}' ``` -Also this operator can be performed manually or automated in the deployment procedure. We can see now that the operator will be able to monitor accordingly the status of the Pods: +Also this operation can be performed manually or automated in the deployment procedure. We can see now that the operator will be able to monitor accordingly the status of the Pods: ``` $ kubectl get it NAMESPACE NAME PHASE RUNTIME PROVIDER RUNTIME VERSION KIT REPLICAS diff --git a/e2e/commonwithcustominstall/synthetic_test.go b/e2e/commonwithcustominstall/synthetic_test.go index a1b92f40ec..2979d0b398 100644 --- a/e2e/commonwithcustominstall/synthetic_test.go +++ b/e2e/commonwithcustominstall/synthetic_test.go @@ -34,6 +34,26 @@ import ( corev1 "k8s.io/api/core/v1" ) +func TestSyntheticIntegrationOff(t *testing.T) { + RegisterTestingT(t) + WithNewTestNamespace(t, func(ns string) { + // Install Camel K without synthetic Integration feature variable (default) + operatorID := "camel-k-synthetic-env-off" + Expect(KamelInstallWithID(operatorID, ns).Execute()).To(Succeed()) + + // Run the external deployment + ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) + Eventually(DeploymentCondition(ns, "my-camel-sb-svc", appsv1.DeploymentProgressing), TestTimeoutShort). + Should(MatchFields(IgnoreExtras, Fields{ + "Status": Equal(corev1.ConditionTrue), + "Reason": Equal("NewReplicaSetAvailable"), + })) + + // Label the deployment --> Verify the Integration is not created + ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) + Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) + }) +} func TestSyntheticIntegrationFromDeployment(t *testing.T) { RegisterTestingT(t) WithNewTestNamespace(t, func(ns string) { @@ -53,8 +73,10 @@ func TestSyntheticIntegrationFromDeployment(t *testing.T) { // Label the deployment --> Verify the Integration is created (cannot still monitor) ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseCannotMonitor)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning)) + Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse)) + Eventually(IntegrationCondition(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should( + WithTransform(IntegrationConditionReason, Equal(v1.IntegrationConditionMonitoringPodsAvailableReason))) // Label the deployment template --> Verify the Integration is monitored ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns)) @@ -63,12 +85,9 @@ func TestSyntheticIntegrationFromDeployment(t *testing.T) { one := int32(1) Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one)) - // Delete the deployment --> Verify the Integration is in missing status + // Delete the deployment --> Verify the Integration is eventually garbage collected ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns)) - Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseImportMissing)) - Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse)) - zero := int32(0) - Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&zero)) + Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil()) // Recreate the deployment and label --> Verify the Integration is monitored ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns)) diff --git a/pkg/apis/camel/v1/integration_types.go b/pkg/apis/camel/v1/integration_types.go index 9bcecaad2f..9f293f6a63 100644 --- a/pkg/apis/camel/v1/integration_types.go +++ b/pkg/apis/camel/v1/integration_types.go @@ -155,10 +155,6 @@ const ( IntegrationPhaseRunning IntegrationPhase = "Running" // IntegrationPhaseError --. IntegrationPhaseError IntegrationPhase = "Error" - // IntegrationPhaseImportMissing used when the application from which the Integration is imported has been deleted. - IntegrationPhaseImportMissing IntegrationPhase = "Application Missing" - // IntegrationPhaseCannotMonitor used when the application from which the Integration has not enough information to monitor its pods. - IntegrationPhaseCannotMonitor IntegrationPhase = "Cannot Monitor Pods" // IntegrationConditionReady --. IntegrationConditionReady IntegrationConditionType = "Ready" @@ -186,8 +182,6 @@ const ( IntegrationConditionProbesAvailable IntegrationConditionType = "ProbesAvailable" // IntegrationConditionTraitInfo --. IntegrationConditionTraitInfo IntegrationConditionType = "TraitInfo" - // IntegrationConditionMonitoringPodsAvailable used to specify that the Pods generated are available for monitoring. - IntegrationConditionMonitoringPodsAvailable IntegrationConditionType = "MonitoringPodsAvailable" // IntegrationConditionKitAvailableReason --. IntegrationConditionKitAvailableReason string = "IntegrationKitAvailable" diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go index 97996f48c7..f4bf99b820 100644 --- a/pkg/cmd/operator/operator.go +++ b/pkg/cmd/operator/operator.go @@ -235,7 +235,7 @@ func Run(healthPort, monitoringPort int32, leaderElection bool, leaderElectionID synthEnvVal, synth := os.LookupEnv("CAMEL_K_SYNTHETIC_INTEGRATIONS") if synth && synthEnvVal == "true" { log.Info("Starting the synthetic Integration manager") - exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache(), mgr.GetAPIReader()), "synthetic Integration manager error") + exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache()), "synthetic Integration manager error") } else { log.Info("Synthetic Integration manager not configured, skipping") } diff --git a/pkg/controller/integration/integration_controller.go b/pkg/controller/integration/integration_controller.go index a16aa69672..5129f6ade5 100644 --- a/pkg/controller/integration/integration_controller.go +++ b/pkg/controller/integration/integration_controller.go @@ -415,15 +415,28 @@ func watchCronJobResources(b *builder.Builder) { } func watchKnativeResources(ctx context.Context, c client.Client, b *builder.Builder) error { - // Check for permission to watch the Knative Service resource - checkCtx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - if ok, err := kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil { + // Watch for the owned Knative Services conditionally + if ok, err := kubernetes.IsAPIResourceInstalled(c, servingv1.SchemeGroupVersion.String(), reflect.TypeOf(servingv1.Service{}).Name()); err != nil { return err } else if ok { - // Watch for the owned Knative Services - b.Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) + // Check for permission to watch the Knative Service resource + checkCtx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + if ok, err = kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil { + return err + } else if ok { + log.Info("KnativeService resources installed in the cluster. RBAC privileges assigned correctly, you can use Knative features.") + b.Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{})) + } else { + log.Info(` KnativeService resources installed in the cluster. However Camel K operator has not the required RBAC privileges. You can't use Knative features. + Make sure to apply the required RBAC privileges and restart the Camel K Operator Pod to be able to watch for Camel K managed Knative Services.`) + } + } else { + log.Info(`KnativeService resources are not installed in the cluster. You can't use Knative features. If you install Knative Serving resources after the + Camel K operator, make sure to apply the required RBAC privileges and restart the Camel K Operator Pod to be able to watch for + Camel K managed Knative Services.`) } + return nil } diff --git a/pkg/controller/integration/monitor.go b/pkg/controller/integration/monitor.go index fb86ed41db..048136d911 100644 --- a/pkg/controller/integration/monitor.go +++ b/pkg/controller/integration/monitor.go @@ -59,8 +59,7 @@ func (action *monitorAction) Name() string { func (action *monitorAction) CanHandle(integration *v1.Integration) bool { return integration.Status.Phase == v1.IntegrationPhaseDeploying || integration.Status.Phase == v1.IntegrationPhaseRunning || - integration.Status.Phase == v1.IntegrationPhaseError || - integration.Status.Phase == v1.IntegrationPhaseCannotMonitor + integration.Status.Phase == v1.IntegrationPhaseError } func (action *monitorAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) { @@ -142,10 +141,9 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait if !controller.hasTemplateIntegrationLabel() { // This is happening when the Deployment, CronJob, etc resources // miss the Integration label, required to identify sibling Pods. - integration.Status.Phase = v1.IntegrationPhaseCannotMonitor integration.Status.SetConditions( v1.IntegrationCondition{ - Type: v1.IntegrationConditionMonitoringPodsAvailable, + Type: v1.IntegrationConditionReady, Status: corev1.ConditionFalse, Reason: v1.IntegrationConditionMonitoringPodsAvailableReason, Message: fmt.Sprintf( @@ -158,13 +156,6 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait return integration, nil } - integration.Status.SetConditions( - v1.IntegrationCondition{ - Type: v1.IntegrationConditionMonitoringPodsAvailable, - Status: corev1.ConditionTrue, - Reason: v1.IntegrationConditionMonitoringPodsAvailableReason, - }, - ) // Enforce the scale sub-resource label selector. // It is used by the HPA that queries the scale sub-resource endpoint, // to list the pods owned by the integration. @@ -296,8 +287,6 @@ type controller interface { checkReadyCondition(ctx context.Context) (bool, error) getPodSpec() corev1.PodSpec updateReadyCondition(readyPods int) bool - getSelector() metav1.LabelSelector - isEmptySelector() bool hasTemplateIntegrationLabel() bool getControllerName() string } @@ -359,10 +348,6 @@ func (action *monitorAction) updateIntegrationPhaseAndReadyCondition( ctx context.Context, controller controller, environment *trait.Environment, integration *v1.Integration, pendingPods []corev1.Pod, runningPods []corev1.Pod, ) error { - controller, err := action.newController(environment, integration) - if err != nil { - return err - } if done, err := controller.checkReadyCondition(ctx); done || err != nil { // There may be pods that are not ready but still probable for getting error messages. // Ignore returned error from probing as it's expected when the ctrl obj is not ready. diff --git a/pkg/controller/integration/monitor_synthetic.go b/pkg/controller/integration/monitor_synthetic.go index a51758814e..beb736b1d7 100644 --- a/pkg/controller/integration/monitor_synthetic.go +++ b/pkg/controller/integration/monitor_synthetic.go @@ -19,7 +19,6 @@ package integration import ( "context" - "fmt" corev1 "k8s.io/api/core/v1" @@ -46,18 +45,8 @@ func (action *monitorSyntheticAction) Handle(ctx context.Context, integration *v if err != nil { // Importing application no longer available if k8serrors.IsNotFound(err) { - // It could be a normal condition, don't report as an error - integration.Status.Phase = v1.IntegrationPhaseImportMissing - message := fmt.Sprintf( - "import %s %s no longer available", - integration.Annotations[v1.IntegrationImportedKindLabel], - integration.Annotations[v1.IntegrationImportedNameLabel], - ) - integration.SetReadyConditionError(message) - zero := int32(0) - integration.Status.Phase = v1.IntegrationPhaseImportMissing - integration.Status.Replicas = &zero - return integration, nil + // Application was deleted. The GC will take care of + return nil, nil } // other reasons, likely some error to report integration.Status.Phase = v1.IntegrationPhaseError diff --git a/pkg/controller/integration/monitor_synthetic_test.go b/pkg/controller/integration/monitor_synthetic_test.go index aa1f9b2325..b1cf8a66c2 100644 --- a/pkg/controller/integration/monitor_synthetic_test.go +++ b/pkg/controller/integration/monitor_synthetic_test.go @@ -125,13 +125,10 @@ func TestMonitorSyntheticIntegrationCannotMonitorPods(t *testing.T) { assert.True(t, a.CanHandle(importedIt)) handledIt, err := a.Handle(context.TODO(), importedIt) assert.Nil(t, err) - assert.Equal(t, v1.IntegrationPhaseCannotMonitor, handledIt.Status.Phase) - // Ready condition should be still true - assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) + assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) // Check monitoring pods condition - assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) - assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) - assert.Equal(t, "Could not find `camel.apache.org/integration: my-imported-it` label in the Deployment/my-deploy template. Make sure to include this label in the template for Pod monitoring purposes.", handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Message) + assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) + assert.Equal(t, "Could not find `camel.apache.org/integration: my-imported-it` label in the Deployment/my-deploy template. Make sure to include this label in the template for Pod monitoring purposes.", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) } func TestMonitorSyntheticIntegrationDeployment(t *testing.T) { @@ -246,9 +243,6 @@ func TestMonitorSyntheticIntegrationDeployment(t *testing.T) { assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) assert.Equal(t, v1.IntegrationConditionDeploymentReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) assert.Equal(t, "1/1 ready replicas", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) - // Check monitoring pods condition - assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) - assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) // Remove label from deployment deploy.Labels = nil @@ -369,9 +363,6 @@ func TestMonitorSyntheticIntegrationCronJob(t *testing.T) { assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) assert.Equal(t, v1.IntegrationConditionCronJobCreatedReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) assert.Equal(t, "cronjob created", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message) - // Check monitoring pods condition - assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) - assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) } func TestMonitorSyntheticIntegrationKnativeService(t *testing.T) { @@ -492,7 +483,4 @@ func TestMonitorSyntheticIntegrationKnativeService(t *testing.T) { // Ready condition assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status) assert.Equal(t, v1.IntegrationConditionKnativeServiceReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason) - // Check monitoring pods condition - assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status) - assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason) } diff --git a/pkg/controller/synthetic/synthetic.go b/pkg/controller/synthetic/synthetic.go index c5f7bbb342..974a2eb05f 100644 --- a/pkg/controller/synthetic/synthetic.go +++ b/pkg/controller/synthetic/synthetic.go @@ -31,6 +31,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientgocache "k8s.io/client-go/tools/cache" "knative.dev/serving/pkg/apis/serving" servingv1 "knative.dev/serving/pkg/apis/serving/v1" @@ -38,12 +39,17 @@ import ( ctrl "sigs.k8s.io/controller-runtime/pkg/client" ) +var ( + controller = true + blockOwnerDeletion = true +) + // ManageSyntheticIntegrations is the controller for synthetic Integrations. Consider that the lifecycle of the objects are driven -// by the way we are monitoring them. Since we're filtering by `camel.apache.org/integration` label in the cached clinet, +// by the way we are monitoring them. Since we're filtering by `camel.apache.org/integration` label in the cached client, // you must consider an add, update or delete // accordingly, ie, when the user label the resource, then it is considered as an add, when it removes the label, it is considered as a delete. // We must filter only non managed objects in order to avoid to conflict with the reconciliation loop of managed objects (owned by an Integration). -func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cache.Cache, reader ctrl.Reader) error { +func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cache.Cache) error { informers, err := getInformers(ctx, c, cache) if err != nil { return err @@ -73,15 +79,7 @@ func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cac log.Errorf(err, "Some error happened while loading a synthetic Integration %s", integrationName) } } else { - if it.Status.Phase == v1.IntegrationPhaseImportMissing { - // Update with proper phase (reconciliation will take care) - it.Status.Phase = v1.IntegrationPhaseNone - if err = updateSyntheticIntegration(ctx, c, it); err != nil { - log.Errorf(err, "Some error happened while updatinf a synthetic Integration %s", integrationName) - } - } else { - log.Infof("Synthetic Integration %s is in phase %s. Skipping.", integrationName, it.Status.Phase) - } + log.Infof("Synthetic Integration %s is in phase %s. Skipping.", integrationName, it.Status.Phase) } } }, @@ -93,44 +91,11 @@ func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cac } if !isManagedObject(ctrlObj) { integrationName := ctrlObj.GetLabels()[v1.IntegrationLabel] - // We must use a non caching client to understand if the object has been deleted from the cluster or only deleted from - // the cache (ie, user removed the importing label) - err := reader.Get(ctx, ctrl.ObjectKeyFromObject(ctrlObj), ctrlObj) - if err != nil { - if k8serrors.IsNotFound(err) { - // Object removed from the cluster - it, err := getSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName) - if err != nil { - log.Errorf(err, "Some error happened while loading a synthetic Integration %s", it.Name) - return - } - // The resource from which we imported has been deleted, report in it status. - // It may be a temporary situation, for example, if the deployment from which the Integration is imported - // is being redeployed. For this reason we should keep the Integration instead of forcefully removing it. - message := fmt.Sprintf( - "import %s %s no longer available", - it.Annotations[v1.IntegrationImportedKindLabel], - it.Annotations[v1.IntegrationImportedNameLabel], - ) - it.SetReadyConditionError(message) - zero := int32(0) - it.Status.Phase = v1.IntegrationPhaseImportMissing - it.Status.Replicas = &zero - if err = updateSyntheticIntegration(ctx, c, it); err != nil { - log.Errorf(err, "Some error happened while updating a synthetic Integration %s", it.Name) - } - log.Infof("Updated synthetic Integration %s with status %s", it.GetName(), it.Status.Phase) - } else { - log.Errorf(err, "Some error happened while loading object %s from the cluster", ctrlObj.GetName()) - return - } - } else { - // Importing label removed - if err = deleteSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName); err != nil { - log.Errorf(err, "Some error happened while deleting a synthetic Integration %s", integrationName) - } - log.Infof("Deleted synthetic Integration %s", integrationName) + // Importing label removed + if err = deleteSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName); err != nil { + log.Errorf(err, "Some error happened while deleting a synthetic Integration %s", integrationName) } + log.Infof("Deleted synthetic Integration %s", integrationName) } }, }) @@ -186,10 +151,6 @@ func deleteSyntheticIntegration(ctx context.Context, c client.Client, namespace, return c.Delete(ctx, &it) } -func updateSyntheticIntegration(ctx context.Context, c client.Client, it *v1.Integration) error { - return c.Status().Update(ctx, it, ctrl.FieldOwner("camel-k-operator")) -} - // isManagedObject returns true if the object is managed by an Integration. func isManagedObject(obj ctrl.Object) bool { for _, mr := range obj.GetOwnerReferences() { @@ -243,6 +204,17 @@ func (app *nonManagedCamelDeployment) Integration() *v1.Integration { }, }, } + references := []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: app.deploy.Name, + UID: app.deploy.UID, + Controller: &controller, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + } + it.SetOwnerReferences(references) return &it } @@ -277,6 +249,17 @@ func (app *NonManagedCamelCronjob) Integration() *v1.Integration { it.Spec = v1.IntegrationSpec{ Traits: v1.Traits{}, } + references := []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "CronJob", + Name: app.cron.Name, + UID: app.cron.UID, + Controller: &controller, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + } + it.SetOwnerReferences(references) return &it } @@ -296,5 +279,16 @@ func (app *NonManagedCamelKnativeService) Integration() *v1.Integration { it.Spec = v1.IntegrationSpec{ Traits: v1.Traits{}, } + references := []metav1.OwnerReference{ + { + APIVersion: servingv1.SchemeGroupVersion.String(), + Kind: "Service", + Name: app.ksvc.Name, + UID: app.ksvc.UID, + Controller: &controller, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + } + it.SetOwnerReferences(references) return &it } diff --git a/pkg/controller/synthetic/synthetic_test.go b/pkg/controller/synthetic/synthetic_test.go index c600f6d3e9..fcc15077af 100644 --- a/pkg/controller/synthetic/synthetic_test.go +++ b/pkg/controller/synthetic/synthetic_test.go @@ -115,6 +115,17 @@ func TestNonManagedDeployment(t *testing.T) { }, }, } + references := []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: deploy.Name, + UID: deploy.UID, + Controller: &controller, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + } + expectedIt.SetOwnerReferences(references) deploymentAdapter, err := nonManagedCamelApplicationFactory(deploy) assert.Nil(t, err) @@ -164,7 +175,17 @@ func TestNonManagedCronJob(t *testing.T) { v1.IntegrationImportedKindLabel: "CronJob", v1.IntegrationSyntheticLabel: "true", }) - + references := []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "CronJob", + Name: cron.Name, + UID: cron.UID, + Controller: &controller, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + } + expectedIt.SetOwnerReferences(references) cronJobAdapter, err := nonManagedCamelApplicationFactory(cron) assert.Nil(t, err) assert.NotNil(t, cronJobAdapter) @@ -174,7 +195,7 @@ func TestNonManagedCronJob(t *testing.T) { func TestNonManagedKnativeService(t *testing.T) { ksvc := &servingv1.Service{ TypeMeta: metav1.TypeMeta{ - APIVersion: appsv1.SchemeGroupVersion.String(), + APIVersion: servingv1.SchemeGroupVersion.String(), Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ @@ -213,6 +234,17 @@ func TestNonManagedKnativeService(t *testing.T) { v1.IntegrationImportedKindLabel: "KnativeService", v1.IntegrationSyntheticLabel: "true", }) + references := []metav1.OwnerReference{ + { + APIVersion: servingv1.SchemeGroupVersion.String(), + Kind: "Service", + Name: ksvc.Name, + UID: ksvc.UID, + Controller: &controller, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + } + expectedIt.SetOwnerReferences(references) knativeServiceAdapter, err := nonManagedCamelApplicationFactory(ksvc) assert.Nil(t, err)