diff --git a/Makefile b/Makefile index 78132db..1388418 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ SHELL = /usr/bin/env bash -o pipefail # Define Docker related variables. REGISTRY ?= projectsveltos -IMAGE_NAME ?= k8s-pruner +IMAGE_NAME ?= k8s-cleaner ARCH ?= amd64 OS ?= $(shell uname -s | tr A-Z a-z) K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) @@ -155,7 +155,7 @@ K8S_VERSION := v1.28.0 endif KIND_CONFIG ?= kind-cluster.yaml -CONTROL_CLUSTER_NAME ?= pruner-management +CONTROL_CLUSTER_NAME ?= cleaner-management TIMEOUT ?= 10m NUM_NODES ?= 6 @@ -167,15 +167,15 @@ test: | check-manifests generate fmt vet $(SETUP_ENVTEST) ## Run uts. kind-test: test create-cluster fv ## Build docker image; start kind cluster; load docker image; install all cluster api components and run fv .PHONY: fv -fv: $(KUBECTL) $(GINKGO) ## Run Pruner Controller tests using existing cluster +fv: $(KUBECTL) $(GINKGO) ## Run Cleaner Controller tests using existing cluster cd test/fv; $(GINKGO) -nodes $(NUM_NODES) --label-filter='FV' --v --trace --randomize-all .PHONY: create-cluster create-cluster: $(KIND) $(KUBECTL) $(ENVSUBST) ## Create a new kind cluster designed for development $(MAKE) create-control-cluster - @echo "Start pruner" - $(MAKE) deploy-pruner + @echo "Start cleaner" + $(MAKE) deploy-cleaner .PHONY: delete-cluster delete-cluster: $(KIND) ## Deletes the kind cluster $(CONTROL_CLUSTER_NAME) @@ -187,17 +187,17 @@ create-control-cluster: $(KIND) $(CLUSTERCTL) $(KUBECTL) sed -e "s/K8S_VERSION/$(K8S_VERSION)/g" test/$(KIND_CONFIG) > test/$(KIND_CONFIG).tmp $(KIND) create cluster --name=$(CONTROL_CLUSTER_NAME) --config test/$(KIND_CONFIG).tmp -deploy-pruner: $(KUSTOMIZE) - # Load pruner image into cluster - @echo 'Load pruner image into cluster' +deploy-cleaner: $(KUSTOMIZE) + # Load cleaner image into cluster + @echo 'Load cleaner image into cluster' $(MAKE) load-image - # Install k8s-pruner components - @echo 'Install k8s-pruner components' + # Install k8s-cleaner components + @echo 'Install k8s-cleaner components' cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f- - @echo "Waiting for k8s-pruner to be available..." - $(KUBECTL) wait --for=condition=Available deployment/k8s-pruner-controller -n projectsveltos --timeout=$(TIMEOUT) + @echo "Waiting for k8s-cleaner to be available..." + $(KUBECTL) wait --for=condition=Available deployment/k8s-cleaner-controller -n projectsveltos --timeout=$(TIMEOUT) set-manifest-image: $(info Updating kustomize image patch file for manager resource) @@ -247,10 +247,10 @@ uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specifi $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy -deploy: manifests $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config. +deploy: manifests load-image $(KUSTOMIZE) $(KUBECTL) $(ENVSUBST) ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f - .PHONY: undeploy -undeploy: s $(KUSTOMIZE) ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +undeploy: $(KUSTOMIZE) ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - diff --git a/PROJECT b/PROJECT index 819fbcf..11f8074 100644 --- a/PROJECT +++ b/PROJECT @@ -5,8 +5,8 @@ domain: projectsveltos.io layout: - go.kubebuilder.io/v4 -projectName: k8s-pruner -repo: gianlucam76/k8s-pruner +projectName: k8s-cleaner +repo: gianlucam76/k8s-cleaner resources: - api: crdVersion: v1 @@ -14,7 +14,7 @@ resources: controller: true domain: projectsveltos.io group: apps - kind: Pruner - path: gianlucam76/k8s-pruner/api/v1alpha1 + kind: Cleaner + path: gianlucam76/k8s-cleaner/api/v1alpha1 version: v1alpha1 version: "3" diff --git a/README.md b/README.md index 08f0a6f..e5b5739 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,58 @@ -The Kubernetes controller __Pruner__ efficiently removes or updates stale resources in your cluster. It's designed to handle different types of resources and supports label filtering and Lua-based selection criteria. +The Kubernetes controller __Cleaner__ proactively identifies, removes, or updates stale resources to maintain a clean and efficient Kubernetes environment. It's designed to handle any Kubernetes resource types (including your own custom resources) and provides sophisticated filtering capabilities, including label-based selection and custom Lua-based criteria. -It provides a flexible and customizable approach to identifying and removing or updating outdated resources, helping to maintain a clean and efficient Kubernetes environment. The ability to select resources based on labels and utilize Lua-based selection criteria further enhances its applicability to various scenarios. +## Flexibility and Customization: -## Removing All Secrets +- **Schedule**: Specify the frequency at which the Cleaner should scan the cluster and identify stale resources. Utilize the Cron syntax to define recurring schedules. + +- **DryRun**: Enable safe testing of the Cleaner's filtering logic without affecting actual resource configurations. Resources matching the criteria will be identified, but no changes will be applied. + +- **Label Filtering**: Select resources based on user-defined labels, filtering out unwanted or outdated components. Refine the selection based on label key, operation (equal, different, etc.), and value. + +- **Lua-based Selection Criteria**: Leverage Lua scripting to create complex and dynamic selection criteria, catering to specific resource management needs. Define custom logic to identify and handle stale resources. + +## Maintaining a Clean and Efficient Cluster: + +- **Resource Removal**: Efficiently remove stale resources from your cluster, reclaiming unused resources and improving resource utilization. + +- **Resource Updates**: Update outdated resources to ensure they align with the latest configurations and maintain consistent functionality. + +- **Reduced Resource Bloat**: Minimize resource bloat and maintain a clean and organized cluster, improving overall performance and stability. + +By combining the flexibility of scheduling, the accuracy of label filtering, the power of Lua-based criteria, and the ability to remove or update stale resources, Cleaner empowers users to effectively manage their Kubernetes environments and optimize resource usage. + +## Removing Unwanted Secrets To remove all Secrets from the test namespace every day at 1 AM, use the following YAML configuration: ```yaml apiVersion: apps.projectsveltos.io/v1alpha1 -kind: Pruner +kind: Cleaner metadata: - name: pruner-sample + name: cleaner-sample spec: - schedule: "* 1 * * *" - staleResources: + schedule: "* 1 * * *" # Runs every day at 1 AM + matchingResources: - namespace: test kind: Secret group: "" version: v1 - action: Delete + action: Delete # Deletes matching Secrets ``` +This configuration instructs the Cleaner to scan the test namespace every day at 1 AM, identify all Secrets, and effectively eliminate them, ensuring a clean and organized cluster. + ## Selecting Resources with Label Filters -__Pruner__ can select resources based on their labels. For example, the following configuration removes all Deployment instances in the __test__ namespace that have both __serving=api__ and __environment!=production__ labels: +__Cleaner__ can select resources based on their labels, enabling precise resource management. For instance, to eliminate Deployments in the __test__ namespace with both ``serving=api`` and ``environment!=production`` labels, follow this YAML configuration: ```yaml apiVersion: apps.projectsveltos.io/v1alpha1 -kind: Pruner +kind: Cleaner metadata: - name: pruner-sample1 + name: cleaner-sample1 spec: - schedule: "* 0 * * *" - staleResources: + schedule: "* 0 * * *" # Executes every day at midnight + matchingResources: - namespace: test kind: Deployment group: "apps" @@ -40,27 +60,29 @@ spec: labelFilters: - key: serving operation: Equal - value: api + value: api # Identifies Deployments with "serving" label set to "api" - key: environment operation: Different - value: prouction - action: Delete + value: prouction # Identifies Deployments with "environment" label different from "production" + action: Delete # Deletes matching Deployments ``` +By utilizing label filters, you can refine the scope of resource management, ensuring that only specific resources are targeted for removal or update. This targeted approach helps maintain a clean and organized Kubernetes environment without affecting unintended resources. + ## Using Lua for Advanced Selection -__Pruner__ allows you to define __Lua__ functions named ``evaluate`` for customized selection criteria. This function receives the resource object as obj. +__Cleaner__ extends its capabilities by enabling the use of __Lua__ scripts for defining advanced selection criteria. These Lua functions, named __evaluate__, receive the resource object as __obj__ and allow for complex and dynamic filtering rules. -For instance, the following configuration selects all Service instances in the foo namespace that expose port ``443`` or ``8443``: + For example, the following YAML configuration utilizes a Lua script to select all Services in the __foo__ namespace that expose port __443__ or __8443__: ```yaml apiVersion: apps.projectsveltos.io/v1alpha1 -kind: Pruner +kind: Cleaner metadata: - name: pruner-sample2 + name: cleaner-sample2 spec: schedule: "* 0 * * *" - staleResources: + matchingResources: - namespace: foo kind: Service group: "" @@ -68,11 +90,11 @@ spec: evaluate: | function evaluate() hs = {} - hs.matching = false + hs.matching = false -- Initialize matching flag if obj.spec.ports ~= nil then - for _,p in pairs(obj.spec.ports) do - if p.port == 443 or p.port == 8443 then - hs.matching = true + for _,p in pairs(obj.spec.ports) do -- Iterate through the ports + if p.port == 443 or p.port == 8443 then -- Check if port is 443 or 8443 + hs.matching = true -- Set matching flag to true end end end @@ -81,27 +103,31 @@ spec: action: Delete ``` +By leveraging Lua scripts, Cleaner empowers users to define complex and dynamic selection criteria, catering to specific resource management needs. This flexibility enables accurate and targeted identification of stale resources, ensuring effective resource utilization and maintenance of a clean Kubernetes environment. + ## Updating Resources -Besides removing stale resources, __Pruner__ also enables you to update existing resources. This feature empowers you to dynamically modify resource configurations based on specific criteria. For instance, you can replace outdated labels with updated ones, or alter resource settings to align with changing requirements. +Beyond removing stale resources, __Cleaner__ also facilitates the dynamic updating of existing resource configurations. This capability allows you to modify resource specifications based on specific criteria, ensuring alignment with evolving requirements and maintaining resource consistency. Consider the scenario where you want to update Service objects in the foo namespace to use __version2__ apps. -The __evaluate__ function allows you to select resources, Services in the __foo__ namespace pointing to ``version1`` apps. -The __trasnform__ function will change any such a resources, by updating ``obj.spec.selector["app"]`` to ``version2``. + +1. The __evaluate__ function allows you to select resources, Services in the __foo__ namespace pointing to ``version1`` apps. +2. The __trasnform__ function will change any such a resources, by updating ``obj.spec.selector["app"]`` to ``version2``. ```yaml apiVersion: apps.projectsveltos.io/v1alpha1 -kind: Pruner +kind: Cleaner metadata: - name: pruner-sample3 + name: cleaner-sample3 spec: schedule: "* 0 * * *" - staleResources: + matchingResources: - namespace: foo kind: Service group: "" version: v1 evaluate: | + -- Define how resources will be selected function evaluate() hs = {} hs.matching = false @@ -112,8 +138,9 @@ spec: end return hs end - action: Transform + action: Transform # Update matching resources transform: | + -- Define how resources will be updated function transform() hs = {} obj.spec.selector["app"] = "version2" @@ -121,3 +148,43 @@ spec: return hs end ``` + +## DryRun + +To preview which resources match the __Cleaner__'s criteria, set the __DryRun__ flag to true. The Cleaner will still execute its logic but will not actually delete or update any resources. To identify matching resources, search the controller logs for the message "resource is a match for cleaner". + +```yaml +apiVersion: apps.projectsveltos.io/v1alpha1 +kind: Cleaner +metadata: + name: cleaner-sample1 +spec: + schedule: "* 0 * * *" # Runs every day at midnight + dryRun: true # Set to true to preview matching resources + matchingResources: + - namespace: test + kind: Deployment + group: "apps" + version: v1 + labelFilters: + - key: serving + operation: Equal + value: api # Match deployments with the "serving" label set to "api" + - key: environment + operation: Different + value: prouction # Match deployments with the "environment" label different from "production" + action: Delete +``` + +By setting DryRun to true, you can safely test the Cleaner's filtering logic without affecting your actual deployment configurations. Once you're confident in the filtering criteria, you can set DryRun back to false to enable automatic resource deletion. + +## Schedule + +The __schedule__ field specifies when the __Cleaner__ should run its logic to identify and potentially delete or update matching resources. It adheres to the Cron syntax, which is a widely adopted scheduling language for tasks and events. + +The Cron syntax consists of five fields, separated by spaces, each representing a specific part of the scheduling period: minute, hour, day of month, month and day of week, in that order. + +It also accepts + +- Standard crontab specs, e.g. "* * * * ?" +- Descriptors, e.g. "@midnight", "@every 1h30m" diff --git a/api/v1alpha1/pruner_types.go b/api/v1alpha1/cleaner_types.go similarity index 82% rename from api/v1alpha1/pruner_types.go rename to api/v1alpha1/cleaner_types.go index 779281e..b30b094 100644 --- a/api/v1alpha1/pruner_types.go +++ b/api/v1alpha1/cleaner_types.go @@ -35,9 +35,9 @@ const ( ) const ( - // PrunerFinalizer allows Reconciler to clean up resources associated with - // Pruner instance before removing it from the apiserver. - PrunerFinalizer = "prunerfinalizer.projectsveltos.io" + // CleanerFinalizer allows Reconciler to clean up resources associated with + // Cleaner instance before removing it from the apiserver. + CleanerFinalizer = "cleanerfinalizer.projectsveltos.io" ) type Resources struct { @@ -82,9 +82,9 @@ type Resources struct { Transform string `json:"transform,omitempty"` } -// PrunerSpec defines the desired state of Pruner -type PrunerSpec struct { - StaleResources []Resources `json:"staleResources"` +// CleanerSpec defines the desired state of Cleaner +type CleanerSpec struct { + MatchingResources []Resources `json:"matchingResources"` // Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule"` @@ -101,8 +101,8 @@ type PrunerSpec struct { DryRun bool `json:"dryRune,omitempty"` } -// PrunerStatus defines the observed state of Pruner -type PrunerStatus struct { +// CleanerStatus defines the observed state of Cleaner +type CleanerStatus struct { // Information when next snapshot is scheduled // +optional NextScheduleTime *metav1.Time `json:"nextScheduleTime,omitempty"` @@ -117,27 +117,27 @@ type PrunerStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:resource:path=pruners,scope=Cluster +//+kubebuilder:resource:path=cleaners,scope=Cluster //+kubebuilder:subresource:status -// Pruner is the Schema for the pruners API -type Pruner struct { +// Cleaner is the Schema for the cleaners API +type Cleaner struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec PrunerSpec `json:"spec,omitempty"` - Status PrunerStatus `json:"status,omitempty"` + Spec CleanerSpec `json:"spec,omitempty"` + Status CleanerStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true -// PrunerList contains a list of Pruner -type PrunerList struct { +// CleanerList contains a list of Cleaner +type CleanerList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []Pruner `json:"items"` + Items []Cleaner `json:"items"` } func init() { - SchemeBuilder.Register(&Pruner{}, &PrunerList{}) + SchemeBuilder.Register(&Cleaner{}, &CleanerList{}) } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 08a9ab7..0b550c3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -21,13 +21,12 @@ limitations under the License. package v1alpha1 import ( - runtime "k8s.io/apimachinery/pkg/runtime" - apiv1alpha1 "github.com/projectsveltos/libsveltos/api/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pruner) DeepCopyInto(out *Pruner) { +func (in *Cleaner) DeepCopyInto(out *Cleaner) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -35,18 +34,18 @@ func (in *Pruner) DeepCopyInto(out *Pruner) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pruner. -func (in *Pruner) DeepCopy() *Pruner { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cleaner. +func (in *Cleaner) DeepCopy() *Cleaner { if in == nil { return nil } - out := new(Pruner) + out := new(Cleaner) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pruner) DeepCopyObject() runtime.Object { +func (in *Cleaner) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -54,31 +53,31 @@ func (in *Pruner) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrunerList) DeepCopyInto(out *PrunerList) { +func (in *CleanerList) DeepCopyInto(out *CleanerList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Pruner, len(*in)) + *out = make([]Cleaner, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrunerList. -func (in *PrunerList) DeepCopy() *PrunerList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanerList. +func (in *CleanerList) DeepCopy() *CleanerList { if in == nil { return nil } - out := new(PrunerList) + out := new(CleanerList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PrunerList) DeepCopyObject() runtime.Object { +func (in *CleanerList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -86,10 +85,10 @@ func (in *PrunerList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrunerSpec) DeepCopyInto(out *PrunerSpec) { +func (in *CleanerSpec) DeepCopyInto(out *CleanerSpec) { *out = *in - if in.StaleResources != nil { - in, out := &in.StaleResources, &out.StaleResources + if in.MatchingResources != nil { + in, out := &in.MatchingResources, &out.MatchingResources *out = make([]Resources, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) @@ -102,18 +101,18 @@ func (in *PrunerSpec) DeepCopyInto(out *PrunerSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrunerSpec. -func (in *PrunerSpec) DeepCopy() *PrunerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanerSpec. +func (in *CleanerSpec) DeepCopy() *CleanerSpec { if in == nil { return nil } - out := new(PrunerSpec) + out := new(CleanerSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrunerStatus) DeepCopyInto(out *PrunerStatus) { +func (in *CleanerStatus) DeepCopyInto(out *CleanerStatus) { *out = *in if in.NextScheduleTime != nil { in, out := &in.NextScheduleTime, &out.NextScheduleTime @@ -130,12 +129,12 @@ func (in *PrunerStatus) DeepCopyInto(out *PrunerStatus) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrunerStatus. -func (in *PrunerStatus) DeepCopy() *PrunerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanerStatus. +func (in *CleanerStatus) DeepCopy() *CleanerStatus { if in == nil { return nil } - out := new(PrunerStatus) + out := new(CleanerStatus) in.DeepCopyInto(out) return out } diff --git a/cmd/main.go b/cmd/main.go index 4e1862f..9953aaa 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -40,8 +40,8 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" - "gianlucam76/k8s-pruner/internal/controller" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" + "gianlucam76/k8s-cleaner/internal/controller" //+kubebuilder:scaffold:imports ) @@ -103,11 +103,11 @@ func main() { os.Exit(1) } - if err = (&controller.PrunerReconciler{ + if err = (&controller.CleanerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(ctx, mgr, workers, ctrl.Log.WithName("worker")); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Pruner") + setupLog.Error(err, "unable to create controller", "controller", "Cleaner") os.Exit(1) } //+kubebuilder:scaffold:builder @@ -137,7 +137,7 @@ func initFlags(fs *pflag.FlagSet) { const defaultWorkers = 5 fs.IntVar(&workers, "worker-number", defaultWorkers, - "Number of worker. Workers are used to process pruner instances in backgroun") + "Number of worker. Workers are used to process cleaner instances in backgroun") const defautlRestConfigQPS = 40 fs.Float32Var(&restConfigQPS, "kube-api-qps", defautlRestConfigQPS, diff --git a/config/crd/bases/apps.projectsveltos.io_pruners.yaml b/config/crd/bases/apps.projectsveltos.io_cleaners.yaml similarity index 93% rename from config/crd/bases/apps.projectsveltos.io_pruners.yaml rename to config/crd/bases/apps.projectsveltos.io_cleaners.yaml index 85bf97c..61bd212 100644 --- a/config/crd/bases/apps.projectsveltos.io_pruners.yaml +++ b/config/crd/bases/apps.projectsveltos.io_cleaners.yaml @@ -4,20 +4,20 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.13.0 - name: pruners.apps.projectsveltos.io + name: cleaners.apps.projectsveltos.io spec: group: apps.projectsveltos.io names: - kind: Pruner - listKind: PrunerList - plural: pruners - singular: pruner + kind: Cleaner + listKind: CleanerList + plural: cleaners + singular: cleaner scope: Cluster versions: - name: v1alpha1 schema: openAPIV3Schema: - description: Pruner is the Schema for the pruners API + description: Cleaner is the Schema for the cleaners API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -32,17 +32,14 @@ spec: metadata: type: object spec: - description: PrunerSpec defines the desired state of Pruner + description: CleanerSpec defines the desired state of Cleaner properties: dryRune: default: false description: DryRun if set to true, will have controller delete no resource. All matching resources will be listed in status section type: boolean - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - staleResources: + matchingResources: items: properties: action: @@ -109,6 +106,9 @@ spec: - version type: object type: array + schedule: + description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + type: string startingDeadlineSeconds: description: Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions @@ -116,11 +116,11 @@ spec: format: int64 type: integer required: + - matchingResources - schedule - - staleResources type: object status: - description: PrunerStatus defines the observed state of Pruner + description: CleanerStatus defines the observed state of Cleaner properties: failureMessage: description: FailureMessage provides more information about the error, diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index d19eb8f..40ce97c 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,18 +2,18 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: -- bases/apps.projectsveltos.io_pruners.yaml +- bases/apps.projectsveltos.io_cleaners.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- path: patches/webhook_in_pruners.yaml +#- path: patches/webhook_in_cleaners.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- path: patches/cainjection_in_pruners.yaml +#- path: patches/cainjection_in_cleaners.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_pruners.yaml b/config/crd/patches/cainjection_in_pruners.yaml index 0d54bf4..c2d7f7a 100644 --- a/config/crd/patches/cainjection_in_pruners.yaml +++ b/config/crd/patches/cainjection_in_pruners.yaml @@ -4,4 +4,4 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME - name: pruners.apps.projectsveltos.io + name: cleaners.apps.projectsveltos.io diff --git a/config/crd/patches/webhook_in_pruners.yaml b/config/crd/patches/webhook_in_pruners.yaml index 88f63c7..7cb2ed7 100644 --- a/config/crd/patches/webhook_in_pruners.yaml +++ b/config/crd/patches/webhook_in_pruners.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: pruners.apps.projectsveltos.io + name: cleaners.apps.projectsveltos.io spec: conversion: strategy: Webhook diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 6343f1e..accd054 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -6,7 +6,7 @@ namespace: projectsveltos # "wordpress" becomes "alices-wordpress". # Note that it should also match with the prefix (text before '-') of the namespace # field above. -namePrefix: k8s-pruner- +namePrefix: k8s-cleaner- # Labels to add to all resources and selectors. #labels: diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index af34f94..e82cda5 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: projectsveltos/k8s-pruner-amd64:main + - image: projectsveltos/k8s-cleaner-amd64:main name: controller diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index c0a57a4..2163f87 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -9,24 +9,24 @@ metadata: name: controller namespace: projectsveltos labels: - control-plane: k8s-pruner + control-plane: k8s-cleaner app.kubernetes.io/name: deployment app.kubernetes.io/instance: controller-manager app.kubernetes.io/component: manager - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize spec: selector: matchLabels: - control-plane: k8s-pruner + control-plane: k8s-cleaner replicas: 1 template: metadata: annotations: kubectl.kubernetes.io/default-container: controller labels: - control-plane: k8s-pruner + control-plane: k8s-cleaner spec: # TODO(user): Uncomment the following code to configure the nodeAffinity expression # according to the platforms which are supported by your solution. diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index ae34b60..dae3b3d 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -4,12 +4,12 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: - control-plane: k8s-pruner + control-plane: k8s-cleaner app.kubernetes.io/name: servicemonitor app.kubernetes.io/instance: controller-manager-metrics-monitor app.kubernetes.io/component: metrics - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: controller-metrics-monitor namespace: projectsveltos @@ -23,4 +23,4 @@ spec: insecureSkipVerify: true selector: matchLabels: - control-plane: k8s-pruner + control-plane: k8s-cleaner diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml index 11b497f..b6afb11 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -5,8 +5,8 @@ metadata: app.kubernetes.io/name: clusterrole app.kubernetes.io/instance: metrics-reader app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: metrics-reader rules: diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml index 55abeb8..5354a77 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/auth_proxy_role.yaml @@ -5,8 +5,8 @@ metadata: app.kubernetes.io/name: clusterrole app.kubernetes.io/instance: proxy-role app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: proxy-role rules: diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index 554ce55..f25ecdb 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -5,8 +5,8 @@ metadata: app.kubernetes.io/name: clusterrolebinding app.kubernetes.io/instance: proxy-rolebinding app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: proxy-rolebinding roleRef: diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index dc20ccb..13d37ed 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -2,12 +2,12 @@ apiVersion: v1 kind: Service metadata: labels: - control-plane: k8s-pruner + control-plane: k8s-cleaner app.kubernetes.io/name: service app.kubernetes.io/instance: controller-manager-metrics-service app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: controller-metrics-service namespace: projectsveltos @@ -18,4 +18,4 @@ spec: protocol: TCP targetPort: https selector: - control-plane: k8s-pruner + control-plane: k8s-cleaner diff --git a/config/rbac/pruner_editor_role.yaml b/config/rbac/pruner_editor_role.yaml index 49a61f6..2975404 100644 --- a/config/rbac/pruner_editor_role.yaml +++ b/config/rbac/pruner_editor_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to edit pruners. +# permissions for end users to edit cleaners. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: pruner-editor-role + app.kubernetes.io/instance: cleaner-editor-role app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize - name: pruner-editor-role + name: cleaner-editor-role rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners + - cleaners verbs: - create - delete @@ -26,6 +26,6 @@ rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners/status + - cleaners/status verbs: - get diff --git a/config/rbac/pruner_viewer_role.yaml b/config/rbac/pruner_viewer_role.yaml index 5205d55..7dfa0f3 100644 --- a/config/rbac/pruner_viewer_role.yaml +++ b/config/rbac/pruner_viewer_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to view pruners. +# permissions for end users to view cleaners. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: pruner-viewer-role + app.kubernetes.io/instance: cleaner-viewer-role app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize - name: pruner-viewer-role + name: cleaner-viewer-role rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners + - cleaners verbs: - get - list @@ -22,6 +22,6 @@ rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners/status + - cleaners/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e9932c5..0297733 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -13,7 +13,7 @@ rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners + - cleaners verbs: - create - delete @@ -25,13 +25,13 @@ rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners/finalizers + - cleaners/finalizers verbs: - update - apiGroups: - apps.projectsveltos.io resources: - - pruners/status + - cleaners/status verbs: - get - patch diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 68411db..ea65c9b 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -5,8 +5,8 @@ metadata: app.kubernetes.io/name: clusterrolebinding app.kubernetes.io/instance: manager-rolebinding app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: controller-rolebinding roleRef: diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index bb17a3b..52e12d3 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -5,8 +5,8 @@ metadata: app.kubernetes.io/name: serviceaccount app.kubernetes.io/instance: controller-manager-sa app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: k8s-pruner - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize name: controller namespace: projectsveltos diff --git a/config/samples/apps_v1alpha1_pruner.yaml b/config/samples/apps_v1alpha1_pruner.yaml index 0ef268a..8b765a2 100644 --- a/config/samples/apps_v1alpha1_pruner.yaml +++ b/config/samples/apps_v1alpha1_pruner.yaml @@ -1,12 +1,12 @@ apiVersion: apps.projectsveltos.io/v1alpha1 -kind: Pruner +kind: Cleaner metadata: labels: - app.kubernetes.io/name: pruner - app.kubernetes.io/instance: pruner-sample - app.kubernetes.io/part-of: k8s-pruner + app.kubernetes.io/name: cleaner + app.kubernetes.io/instance: cleaner-sample + app.kubernetes.io/part-of: k8s-cleaner app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: k8s-pruner - name: pruner-sample + app.kubernetes.io/created-by: k8s-cleaner + name: cleaner-sample spec: # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 65a9d8f..1283d51 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,4 @@ ## Append samples of your project ## resources: -- apps_v1alpha1_pruner.yaml +- apps_v1alpha1_cleaner.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/go.mod b/go.mod index 2ded54e..10c1db0 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module gianlucam76/k8s-pruner +module gianlucam76/k8s-cleaner go 1.20 diff --git a/internal/controller/pruner_controller.go b/internal/controller/cleaner_controller.go similarity index 52% rename from internal/controller/pruner_controller.go rename to internal/controller/cleaner_controller.go index aa83c5d..3f7a582 100644 --- a/internal/controller/pruner_controller.go +++ b/internal/controller/cleaner_controller.go @@ -32,48 +32,48 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" - "gianlucam76/k8s-pruner/internal/controller/executor" - "gianlucam76/k8s-pruner/pkg/scope" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" + "gianlucam76/k8s-cleaner/internal/controller/executor" + "gianlucam76/k8s-cleaner/pkg/scope" "github.com/go-logr/logr" ) -// PrunerReconciler reconciles a Pruner object -type PrunerReconciler struct { +// CleanerReconciler reconciles a Cleaner object +type CleanerReconciler struct { client.Client Scheme *runtime.Scheme } -//+kubebuilder:rbac:groups=apps.projectsveltos.io,resources=pruners,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=apps.projectsveltos.io,resources=pruners/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=apps.projectsveltos.io,resources=pruners/finalizers,verbs=update +//+kubebuilder:rbac:groups=apps.projectsveltos.io,resources=cleaners,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps.projectsveltos.io,resources=cleaners/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps.projectsveltos.io,resources=cleaners/finalizers,verbs=update //+kubebuilder:rbac:groups="*",resources="*",verbs="*" -func (r *PrunerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r *CleanerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { logger := ctrl.LoggerFrom(ctx) logger.Info("Reconciling") - // Fecth the Pruner instance - pruner := &appsv1alpha1.Pruner{} - err := r.Get(ctx, req.NamespacedName, pruner) + // Fecth the Cleaner instance + cleaner := &appsv1alpha1.Cleaner{} + err := r.Get(ctx, req.NamespacedName, cleaner) if err != nil { if apierrors.IsNotFound(err) { return reconcile.Result{}, nil } - logger.Error(err, "Failed to fetch Pruner") + logger.Error(err, "Failed to fetch Cleaner") return reconcile.Result{}, errors.Wrapf( err, - "Failed to fetch Pruner %s", + "Failed to fetch Cleaner %s", req.NamespacedName, ) } - prunerScope, err := scope.NewPrunerScope(scope.PrunerScopeParams{ + cleanerScope, err := scope.NewCleanerScope(scope.CleanerScopeParams{ Client: r.Client, Logger: logger, - Pruner: pruner, - ControllerName: "pruner", + Cleaner: cleaner, + ControllerName: "cleaner", }) if err != nil { logger.Error(err, "Failed to create profileScope") @@ -85,61 +85,61 @@ func (r *PrunerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c } // Always close the scope when exiting this function so we can persist any - // Pruner changes. + // Cleaner changes. defer func() { - if err := prunerScope.Close(ctx); err != nil { + if err := cleanerScope.Close(ctx); err != nil { reterr = err } }() - logger = logger.WithValues("pruner", pruner.Name) + logger = logger.WithValues("cleaner", cleaner.Name) - if !pruner.DeletionTimestamp.IsZero() { - r.reconcileDelete(prunerScope, logger) + if !cleaner.DeletionTimestamp.IsZero() { + r.reconcileDelete(cleanerScope, logger) return ctrl.Result{}, nil } - return r.reconcileNormal(ctx, prunerScope, logger) + return r.reconcileNormal(ctx, cleanerScope, logger) } -func (r *PrunerReconciler) reconcileDelete(prunerScope *scope.PrunerScope, logger logr.Logger) { +func (r *CleanerReconciler) reconcileDelete(cleanerScope *scope.CleanerScope, logger logr.Logger) { logger.Info("reconcileDelete") - removeQueuedJobs(prunerScope) + removeQueuedJobs(cleanerScope) - if controllerutil.ContainsFinalizer(prunerScope.Pruner, appsv1alpha1.PrunerFinalizer) { - controllerutil.RemoveFinalizer(prunerScope.Pruner, appsv1alpha1.PrunerFinalizer) + if controllerutil.ContainsFinalizer(cleanerScope.Cleaner, appsv1alpha1.CleanerFinalizer) { + controllerutil.RemoveFinalizer(cleanerScope.Cleaner, appsv1alpha1.CleanerFinalizer) } logger.Info("reconcileDelete succeeded") } -func (r *PrunerReconciler) reconcileNormal(ctx context.Context, prunerScope *scope.PrunerScope, +func (r *CleanerReconciler) reconcileNormal(ctx context.Context, cleanerScope *scope.CleanerScope, logger logr.Logger) (reconcile.Result, error) { logger.Info("reconcileSnapshotNormal") - if err := r.addFinalizer(ctx, prunerScope.Pruner, appsv1alpha1.PrunerFinalizer); err != nil { + if err := r.addFinalizer(ctx, cleanerScope.Cleaner, appsv1alpha1.CleanerFinalizer); err != nil { logger.Info(fmt.Sprintf("failed to add finalizer: %s", err)) return reconcile.Result{}, err } executorClient := executor.GetClient() - result := executorClient.GetResult(prunerScope.Pruner.Name) + result := executorClient.GetResult(cleanerScope.Cleaner.Name) if result.ResultStatus != executor.Unavailable { if result.Err != nil { msg := result.Err.Error() - prunerScope.SetFailureMessage(&msg) + cleanerScope.SetFailureMessage(&msg) } else { - prunerScope.SetFailureMessage(nil) + cleanerScope.SetFailureMessage(nil) } } now := time.Now() - nextRun, err := schedule(ctx, prunerScope, logger) + nextRun, err := schedule(ctx, cleanerScope, logger) if err != nil { logger.Info("failed to get next run. Err: %v", err) msg := err.Error() - prunerScope.SetFailureMessage(&msg) + cleanerScope.SetFailureMessage(&msg) return ctrl.Result{}, err } @@ -149,77 +149,77 @@ func (r *PrunerReconciler) reconcileNormal(ctx context.Context, prunerScope *sco } // SetupWithManager sets up the controller with the Manager. -func (r *PrunerReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, +func (r *CleanerReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, numOfWorker int, logger logr.Logger) error { executor.InitializeClient(ctx, logger, mgr.GetConfig(), mgr.GetClient(), numOfWorker) return ctrl.NewControllerManagedBy(mgr). - For(&appsv1alpha1.Pruner{}). + For(&appsv1alpha1.Cleaner{}). Complete(r) } -func (r *PrunerReconciler) addFinalizer(ctx context.Context, pruner *appsv1alpha1.Pruner, finalizer string) error { - if controllerutil.ContainsFinalizer(pruner, finalizer) { +func (r *CleanerReconciler) addFinalizer(ctx context.Context, cleaner *appsv1alpha1.Cleaner, finalizer string) error { + if controllerutil.ContainsFinalizer(cleaner, finalizer) { return nil } - controllerutil.AddFinalizer(pruner, finalizer) + controllerutil.AddFinalizer(cleaner, finalizer) - if err := r.Client.Update(ctx, pruner); err != nil { + if err := r.Client.Update(ctx, cleaner); err != nil { return err } - return r.Get(ctx, types.NamespacedName{Name: pruner.Name}, pruner) + return r.Get(ctx, types.NamespacedName{Name: cleaner.Name}, cleaner) } -func schedule(ctx context.Context, prunerScope *scope.PrunerScope, logger logr.Logger) (*time.Time, error) { - newLastRunTime := prunerScope.Pruner.Status.LastRunTime +func schedule(ctx context.Context, cleanerScope *scope.CleanerScope, logger logr.Logger) (*time.Time, error) { + newLastRunTime := cleanerScope.Cleaner.Status.LastRunTime now := time.Now() - nextRun, err := getNextScheduleTime(prunerScope.Pruner, now) + nextRun, err := getNextScheduleTime(cleanerScope.Cleaner, now) if err != nil { logger.Info("failed to get next run. Err: %v", err) return nil, err } var newNextScheduleTime *metav1.Time - if prunerScope.Pruner.Status.NextScheduleTime == nil { + if cleanerScope.Cleaner.Status.NextScheduleTime == nil { logger.Info("set NextScheduleTime") newNextScheduleTime = &metav1.Time{Time: *nextRun} } else { - if shouldSchedule(prunerScope.Pruner, logger) { + if shouldSchedule(cleanerScope.Cleaner, logger) { logger.Info("queuing job") executorClient := executor.GetClient() - executorClient.Process(ctx, prunerScope.Pruner.Name) + executorClient.Process(ctx, cleanerScope.Cleaner.Name) newLastRunTime = &metav1.Time{Time: now} } newNextScheduleTime = &metav1.Time{Time: *nextRun} } - prunerScope.SetLastRunTime(newLastRunTime) - prunerScope.SetNextScheduleTime(newNextScheduleTime) + cleanerScope.SetLastRunTime(newLastRunTime) + cleanerScope.SetNextScheduleTime(newNextScheduleTime) return nextRun, nil } // getNextScheduleTime gets the time of next schedule after last scheduled and before now -func getNextScheduleTime(pruner *appsv1alpha1.Pruner, now time.Time) (*time.Time, error) { - sched, err := cron.ParseStandard(pruner.Spec.Schedule) +func getNextScheduleTime(cleaner *appsv1alpha1.Cleaner, now time.Time) (*time.Time, error) { + sched, err := cron.ParseStandard(cleaner.Spec.Schedule) if err != nil { - return nil, fmt.Errorf("unparseable schedule %q: %w", pruner.Spec.Schedule, err) + return nil, fmt.Errorf("unparseable schedule %q: %w", cleaner.Spec.Schedule, err) } var earliestTime time.Time - if pruner.Status.LastRunTime != nil { - earliestTime = pruner.Status.LastRunTime.Time + if cleaner.Status.LastRunTime != nil { + earliestTime = cleaner.Status.LastRunTime.Time } else { // If none found, then this is a recently created snapshot - earliestTime = pruner.CreationTimestamp.Time + earliestTime = cleaner.CreationTimestamp.Time } - if pruner.Spec.StartingDeadlineSeconds != nil { + if cleaner.Spec.StartingDeadlineSeconds != nil { // controller is not going to schedule anything below this point - schedulingDeadline := now.Add(-time.Second * time.Duration(*pruner.Spec.StartingDeadlineSeconds)) + schedulingDeadline := now.Add(-time.Second * time.Duration(*cleaner.Spec.StartingDeadlineSeconds)) if schedulingDeadline.After(earliestTime) { earliestTime = schedulingDeadline @@ -241,21 +241,21 @@ func getNextScheduleTime(pruner *appsv1alpha1.Pruner, now time.Time) (*time.Time return &next, nil } -func shouldSchedule(pruner *appsv1alpha1.Pruner, logger logr.Logger) bool { +func shouldSchedule(cleaner *appsv1alpha1.Cleaner, logger logr.Logger) bool { now := time.Now() - logger.Info(fmt.Sprintf("currently next schedule is %s", pruner.Status.NextScheduleTime.Time)) + logger.Info(fmt.Sprintf("currently next schedule is %s", cleaner.Status.NextScheduleTime.Time)) - if now.Before(pruner.Status.NextScheduleTime.Time) { + if now.Before(cleaner.Status.NextScheduleTime.Time) { logger.Info("do not schedule yet") return false } // if last processed request was within 30 seconds, ignore it. // Avoid reprocessing spuriors back-to-back reconciliations - if pruner.Status.LastRunTime != nil { - logger.Info(fmt.Sprintf("last run was requested at %s", pruner.Status.LastRunTime)) + if cleaner.Status.LastRunTime != nil { + logger.Info(fmt.Sprintf("last run was requested at %s", cleaner.Status.LastRunTime)) const ignoreTimeInSecond = 30 - diff := now.Sub(pruner.Status.LastRunTime.Time) + diff := now.Sub(cleaner.Status.LastRunTime.Time) logger.Info(fmt.Sprintf("Elapsed time since last run in minutes %f", diff.Minutes())) return diff.Seconds() >= ignoreTimeInSecond @@ -264,7 +264,7 @@ func shouldSchedule(pruner *appsv1alpha1.Pruner, logger logr.Logger) bool { return true } -func removeQueuedJobs(prunerScope *scope.PrunerScope) { +func removeQueuedJobs(cleanerScope *scope.CleanerScope) { executorClient := executor.GetClient() - executorClient.RemoveEntries(prunerScope.Pruner.Name) + executorClient.RemoveEntries(cleanerScope.Cleaner.Name) } diff --git a/internal/controller/pruner_controller_test.go b/internal/controller/cleaner_controller_test.go similarity index 54% rename from internal/controller/pruner_controller_test.go rename to internal/controller/cleaner_controller_test.go index 98688e6..e0845bf 100644 --- a/internal/controller/pruner_controller_test.go +++ b/internal/controller/cleaner_controller_test.go @@ -30,18 +30,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" - "gianlucam76/k8s-pruner/internal/controller" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" + "gianlucam76/k8s-cleaner/internal/controller" ) -var _ = Describe("PrunerClient", func() { +var _ = Describe("CleanerClient", func() { AfterEach(func() { - pruners := &appsv1alpha1.PrunerList{} - Expect(k8sClient.List(context.TODO(), pruners)).To(Succeed()) + cleaners := &appsv1alpha1.CleanerList{} + Expect(k8sClient.List(context.TODO(), cleaners)).To(Succeed()) - for i := range pruners.Items { - pruner := pruners.Items[i] - Expect(k8sClient.Delete(context.TODO(), &pruner)).To(Succeed()) + for i := range cleaners.Items { + cleaner := cleaners.Items[i] + Expect(k8sClient.Delete(context.TODO(), &cleaner)).To(Succeed()) } }) @@ -49,11 +49,11 @@ var _ = Describe("PrunerClient", func() { now := time.Now() before := now.Add(-time.Second * 30) - pruner := &appsv1alpha1.Pruner{ + cleaner := &appsv1alpha1.Cleaner{ ObjectMeta: metav1.ObjectMeta{ Name: randomString(), }, - Status: appsv1alpha1.PrunerStatus{ + Status: appsv1alpha1.CleanerStatus{ NextScheduleTime: &metav1.Time{Time: before}, }, } @@ -62,39 +62,43 @@ var _ = Describe("PrunerClient", func() { logger, err := zap.NewDevelopment() Expect(err).To(BeNil()) - Expect(controller.ShouldSchedule(pruner, zapr.NewLogger(logger))).To(BeTrue()) + Expect(controller.ShouldSchedule(cleaner, zapr.NewLogger(logger))).To(BeTrue()) after := now.Add(time.Second * 30) - pruner.Status.NextScheduleTime = &metav1.Time{Time: after} + cleaner.Status.NextScheduleTime = &metav1.Time{Time: after} - Expect(controller.ShouldSchedule(pruner, zapr.NewLogger(logger))).To(BeFalse()) + Expect(controller.ShouldSchedule(cleaner, zapr.NewLogger(logger))).To(BeFalse()) }) - It("getNextScheduleTime returns the next time pruner should be scheduled", func() { + It("getNextScheduleTime returns the next time cleaner should be scheduled", func() { now := time.Now() + minute := now.Minute() + 1 + if minute == 60 { + minute = 0 + } - pruner := &appsv1alpha1.Pruner{ + cleaner := &appsv1alpha1.Cleaner{ ObjectMeta: metav1.ObjectMeta{ Name: randomString(), CreationTimestamp: metav1.Time{Time: now}, }, - Spec: appsv1alpha1.PrunerSpec{ - Schedule: fmt.Sprintf("%d * * * *", now.Minute()+1), + Spec: appsv1alpha1.CleanerSpec{ + Schedule: fmt.Sprintf("%d * * * *", minute), }, } - nextSchedule, err := controller.GetNextScheduleTime(pruner, now) + nextSchedule, err := controller.GetNextScheduleTime(cleaner, now) Expect(err).To(BeNil()) - Expect(nextSchedule.Minute()).To(Equal(now.Minute() + 1)) + Expect(nextSchedule.Minute()).To(Equal(minute)) }) It("addFinalizer adds finalizer", func() { - pruner := &appsv1alpha1.Pruner{ + cleaner := &appsv1alpha1.Cleaner{ ObjectMeta: metav1.ObjectMeta{ Name: randomString(), }, - Spec: appsv1alpha1.PrunerSpec{ - StaleResources: []appsv1alpha1.Resources{ + Spec: appsv1alpha1.CleanerSpec{ + MatchingResources: []appsv1alpha1.Resources{ { Kind: randomString(), Group: randomString(), @@ -105,18 +109,18 @@ var _ = Describe("PrunerClient", func() { }, } - Expect(k8sClient.Create(context.TODO(), pruner)).To(Succeed()) + Expect(k8sClient.Create(context.TODO(), cleaner)).To(Succeed()) - reconciler := &controller.PrunerReconciler{ + reconciler := &controller.CleanerReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, } - Expect(controller.AddFinalizer(reconciler, context.TODO(), pruner, appsv1alpha1.PrunerFinalizer)).To(Succeed()) + Expect(controller.AddFinalizer(reconciler, context.TODO(), cleaner, appsv1alpha1.CleanerFinalizer)).To(Succeed()) - currentPruner := &appsv1alpha1.Pruner{} - Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: pruner.Name}, currentPruner)).To(Succeed()) + currentCleaner := &appsv1alpha1.Cleaner{} + Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: cleaner.Name}, currentCleaner)).To(Succeed()) - Expect(controllerutil.ContainsFinalizer(currentPruner, appsv1alpha1.PrunerFinalizer)).To(BeTrue()) + Expect(controllerutil.ContainsFinalizer(currentCleaner, appsv1alpha1.CleanerFinalizer)).To(BeTrue()) }) }) diff --git a/internal/controller/executor/prunerclient.go b/internal/controller/executor/client.go similarity index 85% rename from internal/controller/executor/prunerclient.go rename to internal/controller/executor/client.go index 9479e37..106c011 100644 --- a/internal/controller/executor/prunerclient.go +++ b/internal/controller/executor/client.go @@ -75,18 +75,18 @@ type Manager struct { mu *sync.Mutex - // A request represents a request to process a Pruner instance + // A request represents a request to process a Cleaner instance - // dirty contains all requests (pruner names) which are currently waiting to be served. + // dirty contains all requests (cleaner names) which are currently waiting to be served. dirty []string - // inProgress contains all requests (pruner names) that are currently being served. + // inProgress contains all requests (cleaner names) that are currently being served. inProgress []string - // jobQueue contains all requests (pruner names) that needs to be served. + // jobQueue contains all requests (cleaner names) that needs to be served. jobQueue []string - // results contains results for processed requests (pruner names) + // results contains results for processed requests (cleaner names) results map[string]error } @@ -137,12 +137,12 @@ func GetClient() *Manager { return managerInstance } -func (m *Manager) Process(ctx context.Context, prunerName string) { +func (m *Manager) Process(ctx context.Context, cleanerName string) { m.mu.Lock() defer m.mu.Unlock() - l := m.log.WithValues("pruner", prunerName) - key := prunerName + l := m.log.WithValues("cleaner", cleanerName) + key := cleanerName // Search if request is in dirty. Drop it if already there for i := range m.dirty { @@ -168,11 +168,11 @@ func (m *Manager) Process(ctx context.Context, prunerName string) { } m.log.V(logs.LogDebug).Info("request added to jobQueue") - m.jobQueue = append(m.jobQueue, prunerName) + m.jobQueue = append(m.jobQueue, cleanerName) } -func (m *Manager) GetResult(prunerName string) Result { - responseParam, err := getRequestStatus(prunerName) +func (m *Manager) GetResult(cleanerName string) Result { + responseParam, err := getRequestStatus(cleanerName) if err != nil { return Result{ ResultStatus: Unavailable, @@ -199,11 +199,11 @@ func (m *Manager) GetResult(prunerName string) Result { } } -func (m *Manager) RemoveEntries(prunerName string) { +func (m *Manager) RemoveEntries(cleanerName string) { m.mu.Lock() defer m.mu.Unlock() - key := prunerName + key := cleanerName for i := range m.inProgress { if m.inProgress[i] == key { diff --git a/internal/controller/executor/prunerclient_test.go b/internal/controller/executor/client_test.go similarity index 76% rename from internal/controller/executor/prunerclient_test.go rename to internal/controller/executor/client_test.go index 28a8606..4d2d23a 100644 --- a/internal/controller/executor/prunerclient_test.go +++ b/internal/controller/executor/client_test.go @@ -23,132 +23,132 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "gianlucam76/k8s-pruner/internal/controller/executor" + "gianlucam76/k8s-cleaner/internal/controller/executor" ) -var _ = Describe("PrunerClient", func() { +var _ = Describe("CleanerClient", func() { It("GetResult returns result when available", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - r := map[string]error{prunerName: nil} + r := map[string]error{cleanerName: nil} d.SetResults(r) Expect(len(d.GetResults())).To(Equal(1)) - result := d.GetResult(prunerName) + result := d.GetResult(cleanerName) Expect(result.Err).To(BeNil()) Expect(result.ResultStatus).To(Equal(executor.Processed)) }) It("GetResult returns result when available with error", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - r := map[string]error{prunerName: fmt.Errorf("failed to deploy")} + r := map[string]error{cleanerName: fmt.Errorf("failed to deploy")} d.SetResults(r) Expect(len(d.GetResults())).To(Equal(1)) - result := d.GetResult(prunerName) + result := d.GetResult(cleanerName) Expect(result.Err).ToNot(BeNil()) Expect(result.ResultStatus).To(Equal(executor.Failed)) }) It("GetResult returns InProgress when request is still queued (currently in progress)", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - d.SetInProgress([]string{prunerName}) + d.SetInProgress([]string{cleanerName}) Expect(len(d.GetInProgress())).To(Equal(1)) - result := d.GetResult(prunerName) + result := d.GetResult(cleanerName) Expect(result.Err).To(BeNil()) Expect(result.ResultStatus).To(Equal(executor.InProgress)) }) It("GetResult returns InProgress when request is still queued (currently queued)", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - d.SetJobQueue(prunerName) + d.SetJobQueue(cleanerName) Expect(len(d.GetJobQueue())).To(Equal(1)) - result := d.GetResult(prunerName) + result := d.GetResult(cleanerName) Expect(result.Err).To(BeNil()) Expect(result.ResultStatus).To(Equal(executor.InProgress)) }) It("GetResult returns Unavailable when request is not queued/in progress and result not available", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - result := d.GetResult(prunerName) + result := d.GetResult(cleanerName) Expect(result.Err).To(BeNil()) Expect(result.ResultStatus).To(Equal(executor.Unavailable)) }) It("Process does nothing if already in the dirty set", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - d.SetDirty([]string{prunerName}) + d.SetDirty([]string{cleanerName}) Expect(len(d.GetDirty())).To(Equal(1)) - d.Process(context.TODO(), prunerName) + d.Process(context.TODO(), cleanerName) Expect(len(d.GetDirty())).To(Equal(1)) Expect(len(d.GetInProgress())).To(Equal(0)) Expect(len(d.GetJobQueue())).To(Equal(0)) }) It("Process adds to inProgress", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - d.Process(context.TODO(), prunerName) + d.Process(context.TODO(), cleanerName) Expect(len(d.GetDirty())).To(Equal(1)) Expect(len(d.GetInProgress())).To(Equal(0)) Expect(len(d.GetJobQueue())).To(Equal(1)) }) It("Process if already in progress, does not add to jobQueue", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - d.SetInProgress([]string{prunerName}) + d.SetInProgress([]string{cleanerName}) Expect(len(d.GetInProgress())).To(Equal(1)) - d.Process(context.TODO(), prunerName) + d.Process(context.TODO(), cleanerName) Expect(len(d.GetDirty())).To(Equal(1)) Expect(len(d.GetInProgress())).To(Equal(1)) Expect(len(d.GetJobQueue())).To(Equal(0)) }) It("Process removes existing result", func() { - prunerName := randomString() + cleanerName := randomString() d := executor.GetClient() defer d.ClearInternalStruct() - r := map[string]error{prunerName: nil} + r := map[string]error{cleanerName: nil} d.SetResults(r) Expect(len(d.GetResults())).To(Equal(1)) - d.Process(context.TODO(), prunerName) + d.Process(context.TODO(), cleanerName) Expect(len(d.GetDirty())).To(Equal(1)) Expect(len(d.GetInProgress())).To(Equal(0)) Expect(len(d.GetJobQueue())).To(Equal(1)) diff --git a/internal/controller/executor/executor_suite_test.go b/internal/controller/executor/executor_suite_test.go index 6f18a48..d50254b 100644 --- a/internal/controller/executor/executor_suite_test.go +++ b/internal/controller/executor/executor_suite_test.go @@ -18,7 +18,7 @@ package executor_test import ( "context" - "gianlucam76/k8s-pruner/internal/controller/executor" + "gianlucam76/k8s-cleaner/internal/controller/executor" "path/filepath" "testing" "time" diff --git a/internal/controller/executor/export_test.go b/internal/controller/executor/export_test.go index dd59d68..3229f88 100644 --- a/internal/controller/executor/export_test.go +++ b/internal/controller/executor/export_test.go @@ -17,9 +17,9 @@ limitations under the License. package executor var ( - FetchResources = fetchResources - GetStaleResources = getStaleResources - DeleteStaleResources = deleteStaleResources + FetchResources = fetchResources + GetMatchingResources = getMatchingResources + DeleteMatchingResources = deleteMatchingResources ) func (m *Manager) ClearInternalStruct() { @@ -45,8 +45,8 @@ func (m *Manager) GetDirty() []string { return m.dirty } -func (m *Manager) SetJobQueue(prunerName string) { - m.jobQueue = []string{prunerName} +func (m *Manager) SetJobQueue(cleanerName string) { + m.jobQueue = []string{cleanerName} } func (m *Manager) GetJobQueue() []string { diff --git a/internal/controller/executor/worker.go b/internal/controller/executor/worker.go index 96d9de2..ad6a363 100644 --- a/internal/controller/executor/worker.go +++ b/internal/controller/executor/worker.go @@ -36,13 +36,13 @@ import ( "k8s.io/client-go/restmapper" "sigs.k8s.io/controller-runtime/pkg/client" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" libsveltosv1alpha1 "github.com/projectsveltos/libsveltos/api/v1alpha1" logs "github.com/projectsveltos/libsveltos/lib/logsettings" ) -// A "request" represents a Pruner instance that needs to be processed. +// A "request" represents a Cleaner instance that needs to be processed. // // The flow is following: // - when a request arrives, it is first added to the dirty set or dropped if it already @@ -62,8 +62,8 @@ import ( // If the same request is also present in the dirty set, it is added back to the back of the jobQueue. type responseParams struct { - prunerName string - err error + cleanerName string + err error } var ( @@ -88,32 +88,32 @@ type transformStatus struct { func processRequests(ctx context.Context, i int, logger logr.Logger) { id := i - var prunerName *string + var cleanerName *string logger.V(logs.LogDebug).Info(fmt.Sprintf("started worker %d", id)) for { - if prunerName != nil { - l := logger.WithValues("pruner", prunerName) + if cleanerName != nil { + l := logger.WithValues("cleaner", cleanerName) // Get error only from getIsCleanupFromKey as same key is always used l.Info(fmt.Sprintf("worker: %d processing request", id)) - err := processPrunerInstance(ctx, *prunerName, l) - storeResult(*prunerName, err, l) + err := processCleanerInstance(ctx, *cleanerName, l) + storeResult(*cleanerName, err, l) l.Info(fmt.Sprintf("worker: %d request processed", id)) } - prunerName = nil + cleanerName = nil select { case <-time.After(1 * time.Second): managerInstance.mu.Lock() if len(managerInstance.jobQueue) > 0 { // take a request from queue and remove it from queue - prunerName = &managerInstance.jobQueue[0] + cleanerName = &managerInstance.jobQueue[0] managerInstance.jobQueue = managerInstance.jobQueue[1:] - l := logger.WithValues("pruner", prunerName) + l := logger.WithValues("cleaner", cleanerName) l.V(logs.LogDebug).Info("take from jobQueue") // Add to inProgress l.V(logs.LogDebug).Info("add to inProgress") - key := *prunerName + key := *cleanerName managerInstance.inProgress = append(managerInstance.inProgress, key) // If present remove from dirty for i := range managerInstance.dirty { @@ -132,37 +132,37 @@ func processRequests(ctx context.Context, i int, logger logr.Logger) { } } -func processPrunerInstance(ctx context.Context, prunerName string, logger logr.Logger) error { - pruner, err := getPrunerInstance(ctx, prunerName) +func processCleanerInstance(ctx context.Context, cleanerName string, logger logr.Logger) error { + cleaner, err := getCleanerInstance(ctx, cleanerName) if err != nil { - logger.Info(fmt.Sprintf("failed to get pruner instance: %v", err)) + logger.Info(fmt.Sprintf("failed to get cleaner instance: %v", err)) return err } - if pruner == nil { - logger.V(logs.LogDebug).Info("pruner instance not found") + if cleaner == nil { + logger.V(logs.LogDebug).Info("cleaner instance not found") return nil } - for i := range pruner.Spec.StaleResources { - sr := &pruner.Spec.StaleResources[i] + for i := range cleaner.Spec.MatchingResources { + sr := &cleaner.Spec.MatchingResources[i] var resources []*unstructured.Unstructured - resources, err = getStaleResources(ctx, sr, pruner.Spec.DryRun, logger) + resources, err = getMatchingResources(ctx, sr, cleaner.Spec.DryRun, logger) if err != nil { logger.Info(fmt.Sprintf("failed to fetch resource (gvk: %s): %v", fmt.Sprintf("%s:%s:%s", sr.Group, sr.Version, sr.Kind), err)) return err } if sr.Action == appsv1alpha1.ActionDelete { - return deleteStaleResources(ctx, resources, logger) + return deleteMatchingResources(ctx, resources, logger) } else { - return updateStaleResources(ctx, resources, sr.Transform, logger) + return updateMatchingResources(ctx, resources, sr.Transform, logger) } } return nil } -func getStaleResources(ctx context.Context, sr *appsv1alpha1.Resources, dryRun bool, logger logr.Logger, +func getMatchingResources(ctx context.Context, sr *appsv1alpha1.Resources, dryRun bool, logger logr.Logger, ) ([]*unstructured.Unstructured, error) { resources, err := fetchResources(ctx, sr) @@ -190,7 +190,7 @@ func getStaleResources(ctx context.Context, sr *appsv1alpha1.Resources, dryRun b return nil, err } if isMatch { - l.Info("resource is a match for pruner") + l.Info("resource is a match for cleaner") results = append(results, resource) } } @@ -198,7 +198,7 @@ func getStaleResources(ctx context.Context, sr *appsv1alpha1.Resources, dryRun b return results, nil } -func deleteStaleResources(ctx context.Context, resources []*unstructured.Unstructured, +func deleteMatchingResources(ctx context.Context, resources []*unstructured.Unstructured, logger logr.Logger) error { for i := range resources { @@ -215,7 +215,7 @@ func deleteStaleResources(ctx context.Context, resources []*unstructured.Unstruc return nil } -func updateStaleResources(ctx context.Context, resources []*unstructured.Unstructured, +func updateMatchingResources(ctx context.Context, resources []*unstructured.Unstructured, transform string, logger logr.Logger) error { for i := range resources { @@ -410,26 +410,26 @@ func Transform(resource *unstructured.Unstructured, script string, logger logr.L return result.Resource, nil } -func getPrunerInstance(ctx context.Context, prunerName string) (*appsv1alpha1.Pruner, error) { - pruner := &appsv1alpha1.Pruner{} - err := k8sClient.Get(ctx, types.NamespacedName{Name: prunerName}, pruner) +func getCleanerInstance(ctx context.Context, cleanerName string) (*appsv1alpha1.Cleaner, error) { + cleaner := &appsv1alpha1.Cleaner{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: cleanerName}, cleaner) if apierrors.IsNotFound(err) { err = nil } - return pruner, err + return cleaner, err } // storeResult does following: // - set results for further in time lookup // - remove request from inProgress // - if request is in dirty, remove it from there and add it to the back of the jobQueue -func storeResult(prunerName string, err error, logger logr.Logger) { +func storeResult(cleanerName string, err error, logger logr.Logger) { managerInstance.mu.Lock() defer managerInstance.mu.Unlock() - key := prunerName + key := cleanerName // Remove from inProgress for i := range managerInstance.inProgress { @@ -467,12 +467,12 @@ func storeResult(prunerName string, err error, logger logr.Logger) { // If result is available it returns the result. // If request is still queued, responseParams is nil and an error is nil. // If result is not available and request is neither queued nor already processed, it returns an error to indicate that. -func getRequestStatus(prunerName string) (*responseParams, error) { - logger := managerInstance.log.WithValues("pruner", prunerName) +func getRequestStatus(cleanerName string) (*responseParams, error) { + logger := managerInstance.log.WithValues("cleaner", cleanerName) managerInstance.mu.Lock() defer managerInstance.mu.Unlock() - key := prunerName + key := cleanerName logger.V(logs.LogDebug).Info("searching result") if _, ok := managerInstance.results[key]; ok { @@ -481,8 +481,8 @@ func getRequestStatus(prunerName string) (*responseParams, error) { logger.V(logs.LogDebug).Info("returning a response with an error") } resp := responseParams{ - prunerName: key, - err: managerInstance.results[key], + cleanerName: key, + err: managerInstance.results[key], } logger.V(logs.LogDebug).Info("removing result") delete(managerInstance.results, key) diff --git a/internal/controller/executor/worker_test.go b/internal/controller/executor/worker_test.go index d0581d6..b98a5b8 100644 --- a/internal/controller/executor/worker_test.go +++ b/internal/controller/executor/worker_test.go @@ -28,8 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" - "gianlucam76/k8s-pruner/internal/controller/executor" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" + "gianlucam76/k8s-cleaner/internal/controller/executor" libsveltosv1alpha1 "github.com/projectsveltos/libsveltos/api/v1alpha1" ) @@ -65,13 +65,13 @@ var _ = Describe("Worker", func() { } Expect(k8sClient.Create(context.TODO(), secret)).To(Succeed()) - staleResources := &appsv1alpha1.Resources{ + matchingResources := &appsv1alpha1.Resources{ Kind: "Secret", Group: "", Version: "v1", } - list, err := executor.FetchResources(context.TODO(), staleResources) + list, err := executor.FetchResources(context.TODO(), matchingResources) Expect(err).To(BeNil()) Expect(len(list.Items)).To(Equal(1)) }) @@ -101,7 +101,7 @@ var _ = Describe("Worker", func() { } Expect(k8sClient.Create(context.TODO(), secret2)).To(Succeed()) - staleResources := &appsv1alpha1.Resources{ + matchingResources := &appsv1alpha1.Resources{ Kind: "Secret", Group: "", Version: "v1", @@ -114,13 +114,13 @@ var _ = Describe("Worker", func() { }, } - list, err := executor.FetchResources(context.TODO(), staleResources) + list, err := executor.FetchResources(context.TODO(), matchingResources) Expect(err).To(BeNil()) Expect(len(list.Items)).To(Equal(1)) Expect(list.Items[0].GetName()).To(Equal(secret2.Name)) }) - It("getStaleResources gets stale resources", func() { + It("getMatchingResources gets stale resources", func() { value := randomString() secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -161,7 +161,7 @@ var _ = Describe("Worker", func() { end ` - staleResources := &appsv1alpha1.Resources{ + matchingResources := &appsv1alpha1.Resources{ Kind: "Secret", Group: "", Version: "v1", @@ -169,7 +169,7 @@ var _ = Describe("Worker", func() { } logger, err := zap.NewDevelopment() Expect(err).To(BeNil()) - resources, err := executor.GetStaleResources(context.TODO(), staleResources, false, + resources, err := executor.GetMatchingResources(context.TODO(), matchingResources, false, zapr.NewLogger(logger)) Expect(err).To(BeNil()) Expect(resources).ToNot(BeNil()) diff --git a/internal/controller/export_test.go b/internal/controller/export_test.go index 20cd2f2..1a2da06 100644 --- a/internal/controller/export_test.go +++ b/internal/controller/export_test.go @@ -20,5 +20,5 @@ var ( ShouldSchedule = shouldSchedule GetNextScheduleTime = getNextScheduleTime - AddFinalizer = (*PrunerReconciler).addFinalizer + AddFinalizer = (*CleanerReconciler).addFinalizer ) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 0fdb6e6..11f079d 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -31,7 +31,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" //+kubebuilder:scaffold:imports ) diff --git a/manifest/manifest.yaml b/manifest/manifest.yaml index 62a9821..488817c 100644 --- a/manifest/manifest.yaml +++ b/manifest/manifest.yaml @@ -8,20 +8,20 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.13.0 - name: pruners.apps.projectsveltos.io + name: cleaners.apps.projectsveltos.io spec: group: apps.projectsveltos.io names: - kind: Pruner - listKind: PrunerList - plural: pruners - singular: pruner + kind: Cleaner + listKind: CleanerList + plural: cleaners + singular: cleaner scope: Cluster versions: - name: v1alpha1 schema: openAPIV3Schema: - description: Pruner is the Schema for the pruners API + description: Cleaner is the Schema for the cleaners API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -36,17 +36,14 @@ spec: metadata: type: object spec: - description: PrunerSpec defines the desired state of Pruner + description: CleanerSpec defines the desired state of Cleaner properties: dryRune: default: false description: DryRun if set to true, will have controller delete no resource. All matching resources will be listed in status section type: boolean - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - staleResources: + matchingResources: items: properties: action: @@ -113,6 +110,9 @@ spec: - version type: object type: array + schedule: + description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + type: string startingDeadlineSeconds: description: Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions @@ -120,11 +120,11 @@ spec: format: int64 type: integer required: + - matchingResources - schedule - - staleResources type: object status: - description: PrunerStatus defines the observed state of Pruner + description: CleanerStatus defines the observed state of Cleaner properties: failureMessage: description: FailureMessage provides more information about the error, @@ -151,18 +151,18 @@ kind: ServiceAccount metadata: labels: app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: controller-manager-sa app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: serviceaccount - app.kubernetes.io/part-of: k8s-pruner - name: k8s-pruner-controller + app.kubernetes.io/part-of: k8s-cleaner + name: k8s-cleaner-controller namespace: projectsveltos --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: k8s-pruner-controller-role + name: k8s-cleaner-controller-role rules: - apiGroups: - '*' @@ -173,7 +173,7 @@ rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners + - cleaners verbs: - create - delete @@ -185,13 +185,13 @@ rules: - apiGroups: - apps.projectsveltos.io resources: - - pruners/finalizers + - cleaners/finalizers verbs: - update - apiGroups: - apps.projectsveltos.io resources: - - pruners/status + - cleaners/status verbs: - get - patch @@ -202,12 +202,12 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: metrics-reader app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: clusterrole - app.kubernetes.io/part-of: k8s-pruner - name: k8s-pruner-metrics-reader + app.kubernetes.io/part-of: k8s-cleaner + name: k8s-cleaner-metrics-reader rules: - nonResourceURLs: - /metrics @@ -219,12 +219,12 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: proxy-role app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: clusterrole - app.kubernetes.io/part-of: k8s-pruner - name: k8s-pruner-proxy-role + app.kubernetes.io/part-of: k8s-cleaner + name: k8s-cleaner-proxy-role rules: - apiGroups: - authentication.k8s.io @@ -244,19 +244,19 @@ kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: manager-rolebinding app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/part-of: k8s-pruner - name: k8s-pruner-controller-rolebinding + app.kubernetes.io/part-of: k8s-cleaner + name: k8s-cleaner-controller-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: k8s-pruner-controller-role + name: k8s-cleaner-controller-role subjects: - kind: ServiceAccount - name: k8s-pruner-controller + name: k8s-cleaner-controller namespace: projectsveltos --- apiVersion: rbac.authorization.k8s.io/v1 @@ -264,19 +264,19 @@ kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: proxy-rolebinding app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/part-of: k8s-pruner - name: k8s-pruner-proxy-rolebinding + app.kubernetes.io/part-of: k8s-cleaner + name: k8s-cleaner-proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: k8s-pruner-proxy-role + name: k8s-cleaner-proxy-role subjects: - kind: ServiceAccount - name: k8s-pruner-controller + name: k8s-cleaner-controller namespace: projectsveltos --- apiVersion: v1 @@ -284,13 +284,13 @@ kind: Service metadata: labels: app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: controller-manager-metrics-service app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: service - app.kubernetes.io/part-of: k8s-pruner - control-plane: k8s-pruner - name: k8s-pruner-controller-metrics-service + app.kubernetes.io/part-of: k8s-cleaner + control-plane: k8s-cleaner + name: k8s-cleaner-controller-metrics-service namespace: projectsveltos spec: ports: @@ -299,32 +299,32 @@ spec: protocol: TCP targetPort: https selector: - control-plane: k8s-pruner + control-plane: k8s-cleaner --- apiVersion: apps/v1 kind: Deployment metadata: labels: app.kubernetes.io/component: manager - app.kubernetes.io/created-by: k8s-pruner + app.kubernetes.io/created-by: k8s-cleaner app.kubernetes.io/instance: controller-manager app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: deployment - app.kubernetes.io/part-of: k8s-pruner - control-plane: k8s-pruner - name: k8s-pruner-controller + app.kubernetes.io/part-of: k8s-cleaner + control-plane: k8s-cleaner + name: k8s-cleaner-controller namespace: projectsveltos spec: replicas: 1 selector: matchLabels: - control-plane: k8s-pruner + control-plane: k8s-cleaner template: metadata: annotations: kubectl.kubernetes.io/default-container: controller labels: - control-plane: k8s-pruner + control-plane: k8s-cleaner spec: containers: - args: @@ -332,7 +332,7 @@ spec: - --metrics-bind-address=127.0.0.1:8080 command: - /manager - image: projectsveltos/k8s-pruner-amd64:main + image: projectsveltos/k8s-cleaner-amd64:main livenessProbe: httpGet: path: /healthz @@ -383,5 +383,5 @@ spec: - ALL securityContext: runAsNonRoot: true - serviceAccountName: k8s-pruner-controller + serviceAccountName: k8s-cleaner-controller terminationGracePeriodSeconds: 10 diff --git a/pkg/scope/pruner.go b/pkg/scope/pruner.go index a96f6f2..a70f12a 100644 --- a/pkg/scope/pruner.go +++ b/pkg/scope/pruner.go @@ -25,73 +25,73 @@ import ( "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" ) -// PrunerScopeParams defines the input parameters used to create a new Pruner Scope. -type PrunerScopeParams struct { +// CleanerScopeParams defines the input parameters used to create a new Cleaner Scope. +type CleanerScopeParams struct { Client client.Client Logger logr.Logger - Pruner *appsv1alpha1.Pruner + Cleaner *appsv1alpha1.Cleaner ControllerName string } -// NewPrunerScope creates a new Pruner Scope from the supplied parameters. +// NewCleanerScope creates a new Cleaner Scope from the supplied parameters. // This is meant to be called for each reconcile iteration. -func NewPrunerScope(params PrunerScopeParams) (*PrunerScope, error) { +func NewCleanerScope(params CleanerScopeParams) (*CleanerScope, error) { if params.Client == nil { - return nil, errors.New("client is required when creating a PrunerScope") + return nil, errors.New("client is required when creating a CleanerScope") } - if params.Pruner == nil { - return nil, errors.New("failed to generate new scope from nil Pruner") + if params.Cleaner == nil { + return nil, errors.New("failed to generate new scope from nil Cleaner") } - helper, err := patch.NewHelper(params.Pruner, params.Client) + helper, err := patch.NewHelper(params.Cleaner, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } - return &PrunerScope{ + return &CleanerScope{ Logger: params.Logger, client: params.Client, - Pruner: params.Pruner, + Cleaner: params.Cleaner, patchHelper: helper, controllerName: params.ControllerName, }, nil } -// PrunerScope defines the basic context for an actuator to operate upon. -type PrunerScope struct { +// CleanerScope defines the basic context for an actuator to operate upon. +type CleanerScope struct { logr.Logger client client.Client patchHelper *patch.Helper - Pruner *appsv1alpha1.Pruner + Cleaner *appsv1alpha1.Cleaner controllerName string } // PatchObject persists the feature configuration and status. -func (s *PrunerScope) PatchObject(ctx context.Context) error { +func (s *CleanerScope) PatchObject(ctx context.Context) error { return s.patchHelper.Patch( ctx, - s.Pruner, + s.Cleaner, ) } -// Close closes the current scope persisting the Pruner configuration and status. -func (s *PrunerScope) Close(ctx context.Context) error { +// Close closes the current scope persisting the Cleaner configuration and status. +func (s *CleanerScope) Close(ctx context.Context) error { return s.PatchObject(ctx) } // SetLastRunTime set LastRunTime field -func (s *PrunerScope) SetLastRunTime(lastRunTime *metav1.Time) { - s.Pruner.Status.LastRunTime = lastRunTime +func (s *CleanerScope) SetLastRunTime(lastRunTime *metav1.Time) { + s.Cleaner.Status.LastRunTime = lastRunTime } // SetNextScheduleTime sets NextScheduleTime field -func (s *PrunerScope) SetNextScheduleTime(lastRunTime *metav1.Time) { - s.Pruner.Status.NextScheduleTime = lastRunTime +func (s *CleanerScope) SetNextScheduleTime(lastRunTime *metav1.Time) { + s.Cleaner.Status.NextScheduleTime = lastRunTime } // SetFailureMessage sets FasilureMessage field -func (s *PrunerScope) SetFailureMessage(failureMessage *string) { - s.Pruner.Status.FailureMessage = failureMessage +func (s *CleanerScope) SetFailureMessage(failureMessage *string) { + s.Cleaner.Status.FailureMessage = failureMessage } diff --git a/test/fv/delete_test.go b/test/fv/delete_test.go index 38631bb..fe64b99 100644 --- a/test/fv/delete_test.go +++ b/test/fv/delete_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" ) var ( @@ -45,7 +45,7 @@ var ( end` ) -var _ = Describe("PrunerClient", func() { +var _ = Describe("CleanerClient", func() { const namePrefix = "delete-" It("Delete Action removes matching resources", Label("FV"), func() { ns := namePrefix + randomString() @@ -85,13 +85,18 @@ var _ = Describe("PrunerClient", func() { By(fmt.Sprintf("creating serviceAccount %s", serviceAccount2.Name)) Expect(k8sClient.Create(context.TODO(), serviceAccount2)).To(Succeed()) - // This Pruner matches ServiceAccount1 but does not match ServiceAccount2 - pruner := &appsv1alpha1.Pruner{ + minute := time.Now().Minute() + 1 + if minute == 60 { + minute = 0 + } + + // This Cleaner matches ServiceAccount1 but does not match ServiceAccount2 + cleaner := &appsv1alpha1.Cleaner{ ObjectMeta: metav1.ObjectMeta{ Name: randomString(), }, - Spec: appsv1alpha1.PrunerSpec{ - StaleResources: []appsv1alpha1.Resources{ + Spec: appsv1alpha1.CleanerSpec{ + MatchingResources: []appsv1alpha1.Resources{ { Kind: "ServiceAccount", Group: "", @@ -101,14 +106,14 @@ var _ = Describe("PrunerClient", func() { Action: appsv1alpha1.ActionDelete, }, }, - Schedule: fmt.Sprintf("%d * * * *", time.Now().Minute()+1), + Schedule: fmt.Sprintf("%d * * * *", minute), }, } - By(fmt.Sprintf("creating pruner %s", pruner.Name)) - Expect(k8sClient.Create(context.TODO(), pruner)).To(Succeed()) + By(fmt.Sprintf("creating cleaner %s", cleaner.Name)) + Expect(k8sClient.Create(context.TODO(), cleaner)).To(Succeed()) - // Pruner matches ServiceAccount1. This is then deleted + // Cleaner matches ServiceAccount1. This is then deleted Eventually(func() bool { currentServiceAccount := &corev1.ServiceAccount{} err := k8sClient.Get(context.TODO(), @@ -119,12 +124,12 @@ var _ = Describe("PrunerClient", func() { return apierrors.IsNotFound(err) }, timeout, pollingInterval).Should(BeTrue()) - // Pruner does not match ServiceAccount2. So this is *not* deleted + // Cleaner does not match ServiceAccount2. So this is *not* deleted currentServiceAccount := &corev1.ServiceAccount{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: serviceAccount2.Name}, currentServiceAccount)).To(Succeed()) - deletePruner(pruner.Name) + deleteCleaner(cleaner.Name) }) }) diff --git a/test/fv/fv_suite_test.go b/test/fv/fv_suite_test.go index 5956563..81f81f2 100644 --- a/test/fv/fv_suite_test.go +++ b/test/fv/fv_suite_test.go @@ -35,7 +35,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" ) var ( @@ -96,11 +96,11 @@ func randomString() string { return util.RandomString(length) } -func deletePruner(prunerName string) { - currentPruner := &appsv1alpha1.Pruner{} +func deleteCleaner(cleanerName string) { + currentCleaner := &appsv1alpha1.Cleaner{} Expect(k8sClient.Get(context.TODO(), - types.NamespacedName{Name: prunerName}, currentPruner)).To(Succeed()) + types.NamespacedName{Name: cleanerName}, currentCleaner)).To(Succeed()) - Expect(k8sClient.Delete(context.TODO(), currentPruner)).To(Succeed()) + Expect(k8sClient.Delete(context.TODO(), currentCleaner)).To(Succeed()) } diff --git a/test/fv/transform_test.go b/test/fv/transform_test.go index 59bcf71..4c0a0f6 100644 --- a/test/fv/transform_test.go +++ b/test/fv/transform_test.go @@ -19,7 +19,7 @@ package fv_test import ( "context" "fmt" - appsv1alpha1 "gianlucam76/k8s-pruner/api/v1alpha1" + appsv1alpha1 "gianlucam76/k8s-cleaner/api/v1alpha1" "time" . "github.com/onsi/ginkgo/v2" @@ -51,7 +51,7 @@ var ( end` ) -var _ = Describe("PrunerClient", func() { +var _ = Describe("CleanerClient", func() { const namePrefix = "transform-" It("Transform Action updates matching resources", Label("FV"), func() { ns := namePrefix + randomString() @@ -110,13 +110,18 @@ var _ = Describe("PrunerClient", func() { By(fmt.Sprintf("creating service %s", service2.Name)) Expect(k8sClient.Create(context.TODO(), service2)).To(Succeed()) - // This Pruner matches Service1 but does not match Service2 - pruner := &appsv1alpha1.Pruner{ + minute := time.Now().Minute() + 1 + if minute == 60 { + minute = 0 + } + + // This Cleaner matches Service1 but does not match Service2 + cleaner := &appsv1alpha1.Cleaner{ ObjectMeta: metav1.ObjectMeta{ Name: randomString(), }, - Spec: appsv1alpha1.PrunerSpec{ - StaleResources: []appsv1alpha1.Resources{ + Spec: appsv1alpha1.CleanerSpec{ + MatchingResources: []appsv1alpha1.Resources{ { Kind: "Service", Group: "", @@ -127,14 +132,14 @@ var _ = Describe("PrunerClient", func() { Action: appsv1alpha1.ActionTransform, }, }, - Schedule: fmt.Sprintf("%d * * * *", time.Now().Minute()+1), + Schedule: fmt.Sprintf("%d * * * *", minute), }, } - By(fmt.Sprintf("creating pruner %s", pruner.Name)) - Expect(k8sClient.Create(context.TODO(), pruner)).To(Succeed()) + By(fmt.Sprintf("creating cleaner %s", cleaner.Name)) + Expect(k8sClient.Create(context.TODO(), cleaner)).To(Succeed()) - // Pruner matches Service1. This is then updated + // Cleaner matches Service1. This is then updated Eventually(func() bool { currentService := &corev1.Service{} err := k8sClient.Get(context.TODO(), @@ -148,7 +153,7 @@ var _ = Describe("PrunerClient", func() { return currentService.Spec.Selector[key] == newValue }, timeout, pollingInterval).Should(BeTrue()) - // Pruner does not match ServiceAccount2. So this is *not* updated + // Cleaner does not match ServiceAccount2. So this is *not* updated currentService := &corev1.Service{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: service2.Name}, currentService)).To(Succeed()) @@ -156,6 +161,6 @@ var _ = Describe("PrunerClient", func() { _, ok := currentService.Spec.Selector[key] Expect(ok).To(BeFalse()) - deletePruner(pruner.Name) + deleteCleaner(cleaner.Name) }) })