Skip to content

Commit

Permalink
Merge pull request #2 from gianlucam76/rename
Browse files Browse the repository at this point in the history
Rename Pruner to Cleaner
  • Loading branch information
gianlucam76 authored Dec 9, 2023
2 parents 34b8f9d + 921dbd6 commit cc51d75
Show file tree
Hide file tree
Showing 41 changed files with 534 additions and 454 deletions.
28 changes: 14 additions & 14 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ SHELL = /usr/bin/env bash -o pipefail

# Define Docker related variables.
REGISTRY ?= projectsveltos
IMAGE_NAME ?= k8s-pruner
IMAGE_NAME ?= k8s-cleaner
ARCH ?= amd64
OS ?= $(shell uname -s | tr A-Z a-z)
K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)
Expand Down Expand Up @@ -155,7 +155,7 @@ K8S_VERSION := v1.28.0
endif

KIND_CONFIG ?= kind-cluster.yaml
CONTROL_CLUSTER_NAME ?= pruner-management
CONTROL_CLUSTER_NAME ?= cleaner-management
TIMEOUT ?= 10m
NUM_NODES ?= 6

Expand All @@ -167,15 +167,15 @@ test: | check-manifests generate fmt vet $(SETUP_ENVTEST) ## Run uts.
kind-test: test create-cluster fv ## Build docker image; start kind cluster; load docker image; install all cluster api components and run fv

.PHONY: fv
fv: $(KUBECTL) $(GINKGO) ## Run Pruner Controller tests using existing cluster
fv: $(KUBECTL) $(GINKGO) ## Run Cleaner Controller tests using existing cluster
cd test/fv; $(GINKGO) -nodes $(NUM_NODES) --label-filter='FV' --v --trace --randomize-all

.PHONY: create-cluster
create-cluster: $(KIND) $(KUBECTL) $(ENVSUBST) ## Create a new kind cluster designed for development
$(MAKE) create-control-cluster

@echo "Start pruner"
$(MAKE) deploy-pruner
@echo "Start cleaner"
$(MAKE) deploy-cleaner

.PHONY: delete-cluster
delete-cluster: $(KIND) ## Deletes the kind cluster $(CONTROL_CLUSTER_NAME)
Expand All @@ -187,17 +187,17 @@ create-control-cluster: $(KIND) $(CLUSTERCTL) $(KUBECTL)
sed -e "s/K8S_VERSION/$(K8S_VERSION)/g" test/$(KIND_CONFIG) > test/$(KIND_CONFIG).tmp
$(KIND) create cluster --name=$(CONTROL_CLUSTER_NAME) --config test/$(KIND_CONFIG).tmp

deploy-pruner: $(KUSTOMIZE)
# Load pruner image into cluster
@echo 'Load pruner image into cluster'
deploy-cleaner: $(KUSTOMIZE)
# Load cleaner image into cluster
@echo 'Load cleaner image into cluster'
$(MAKE) load-image
# Install k8s-pruner components
@echo 'Install k8s-pruner components'
# Install k8s-cleaner components
@echo 'Install k8s-cleaner components'
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f-

@echo "Waiting for k8s-pruner to be available..."
$(KUBECTL) wait --for=condition=Available deployment/k8s-pruner-controller -n projectsveltos --timeout=$(TIMEOUT)
@echo "Waiting for k8s-cleaner to be available..."
$(KUBECTL) wait --for=condition=Available deployment/k8s-cleaner-controller -n projectsveltos --timeout=$(TIMEOUT)

set-manifest-image:
$(info Updating kustomize image patch file for manager resource)
Expand Down Expand Up @@ -247,10 +247,10 @@ uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specifi
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -

.PHONY: deploy
deploy: manifests $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
deploy: manifests load-image $(KUSTOMIZE) $(KUBECTL) $(ENVSUBST) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f -

.PHONY: undeploy
undeploy: s $(KUSTOMIZE) ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
undeploy: $(KUSTOMIZE) ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
8 changes: 4 additions & 4 deletions PROJECT
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,16 @@
domain: projectsveltos.io
layout:
- go.kubebuilder.io/v4
projectName: k8s-pruner
repo: gianlucam76/k8s-pruner
projectName: k8s-cleaner
repo: gianlucam76/k8s-cleaner
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: projectsveltos.io
group: apps
kind: Pruner
path: gianlucam76/k8s-pruner/api/v1alpha1
kind: Cleaner
path: gianlucam76/k8s-cleaner/api/v1alpha1
version: v1alpha1
version: "3"
131 changes: 99 additions & 32 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,78 +1,100 @@
The Kubernetes controller __Pruner__ efficiently removes or updates stale resources in your cluster. It's designed to handle different types of resources and supports label filtering and Lua-based selection criteria.
The Kubernetes controller __Cleaner__ proactively identifies, removes, or updates stale resources to maintain a clean and efficient Kubernetes environment. It's designed to handle any Kubernetes resource types (including your own custom resources) and provides sophisticated filtering capabilities, including label-based selection and custom Lua-based criteria.

It provides a flexible and customizable approach to identifying and removing or updating outdated resources, helping to maintain a clean and efficient Kubernetes environment. The ability to select resources based on labels and utilize Lua-based selection criteria further enhances its applicability to various scenarios.
## Flexibility and Customization:

## Removing All Secrets
- **Schedule**: Specify the frequency at which the Cleaner should scan the cluster and identify stale resources. Utilize the Cron syntax to define recurring schedules.

- **DryRun**: Enable safe testing of the Cleaner's filtering logic without affecting actual resource configurations. Resources matching the criteria will be identified, but no changes will be applied.

- **Label Filtering**: Select resources based on user-defined labels, filtering out unwanted or outdated components. Refine the selection based on label key, operation (equal, different, etc.), and value.

- **Lua-based Selection Criteria**: Leverage Lua scripting to create complex and dynamic selection criteria, catering to specific resource management needs. Define custom logic to identify and handle stale resources.

## Maintaining a Clean and Efficient Cluster:

- **Resource Removal**: Efficiently remove stale resources from your cluster, reclaiming unused resources and improving resource utilization.

- **Resource Updates**: Update outdated resources to ensure they align with the latest configurations and maintain consistent functionality.

- **Reduced Resource Bloat**: Minimize resource bloat and maintain a clean and organized cluster, improving overall performance and stability.

By combining the flexibility of scheduling, the accuracy of label filtering, the power of Lua-based criteria, and the ability to remove or update stale resources, Cleaner empowers users to effectively manage their Kubernetes environments and optimize resource usage.

## Removing Unwanted Secrets

To remove all Secrets from the test namespace every day at 1 AM, use the following YAML configuration:

```yaml
apiVersion: apps.projectsveltos.io/v1alpha1
kind: Pruner
kind: Cleaner
metadata:
name: pruner-sample
name: cleaner-sample
spec:
schedule: "* 1 * * *"
staleResources:
schedule: "* 1 * * *" # Runs every day at 1 AM
matchingResources:
- namespace: test
kind: Secret
group: ""
version: v1
action: Delete
action: Delete # Deletes matching Secrets
```
This configuration instructs the Cleaner to scan the test namespace every day at 1 AM, identify all Secrets, and effectively eliminate them, ensuring a clean and organized cluster.
## Selecting Resources with Label Filters
__Pruner__ can select resources based on their labels. For example, the following configuration removes all Deployment instances in the __test__ namespace that have both __serving=api__ and __environment!=production__ labels:
__Cleaner__ can select resources based on their labels, enabling precise resource management. For instance, to eliminate Deployments in the __test__ namespace with both ``serving=api`` and ``environment!=production`` labels, follow this YAML configuration:
```yaml
apiVersion: apps.projectsveltos.io/v1alpha1
kind: Pruner
kind: Cleaner
metadata:
name: pruner-sample1
name: cleaner-sample1
spec:
schedule: "* 0 * * *"
staleResources:
schedule: "* 0 * * *" # Executes every day at midnight
matchingResources:
- namespace: test
kind: Deployment
group: "apps"
version: v1
labelFilters:
- key: serving
operation: Equal
value: api
value: api # Identifies Deployments with "serving" label set to "api"
- key: environment
operation: Different
value: prouction
action: Delete
value: prouction # Identifies Deployments with "environment" label different from "production"
action: Delete # Deletes matching Deployments
```
By utilizing label filters, you can refine the scope of resource management, ensuring that only specific resources are targeted for removal or update. This targeted approach helps maintain a clean and organized Kubernetes environment without affecting unintended resources.
## Using Lua for Advanced Selection
__Pruner__ allows you to define __Lua__ functions named ``evaluate`` for customized selection criteria. This function receives the resource object as obj.
__Cleaner__ extends its capabilities by enabling the use of __Lua__ scripts for defining advanced selection criteria. These Lua functions, named __evaluate__, receive the resource object as __obj__ and allow for complex and dynamic filtering rules.
For instance, the following configuration selects all Service instances in the foo namespace that expose port ``443`` or ``8443``:
For example, the following YAML configuration utilizes a Lua script to select all Services in the __foo__ namespace that expose port __443__ or __8443__:
```yaml
apiVersion: apps.projectsveltos.io/v1alpha1
kind: Pruner
kind: Cleaner
metadata:
name: pruner-sample2
name: cleaner-sample2
spec:
schedule: "* 0 * * *"
staleResources:
matchingResources:
- namespace: foo
kind: Service
group: ""
version: v1
evaluate: |
function evaluate()
hs = {}
hs.matching = false
hs.matching = false -- Initialize matching flag
if obj.spec.ports ~= nil then
for _,p in pairs(obj.spec.ports) do
if p.port == 443 or p.port == 8443 then
hs.matching = true
for _,p in pairs(obj.spec.ports) do -- Iterate through the ports
if p.port == 443 or p.port == 8443 then -- Check if port is 443 or 8443
hs.matching = true -- Set matching flag to true
end
end
end
Expand All @@ -81,27 +103,31 @@ spec:
action: Delete
```
By leveraging Lua scripts, Cleaner empowers users to define complex and dynamic selection criteria, catering to specific resource management needs. This flexibility enables accurate and targeted identification of stale resources, ensuring effective resource utilization and maintenance of a clean Kubernetes environment.
## Updating Resources
Besides removing stale resources, __Pruner__ also enables you to update existing resources. This feature empowers you to dynamically modify resource configurations based on specific criteria. For instance, you can replace outdated labels with updated ones, or alter resource settings to align with changing requirements.
Beyond removing stale resources, __Cleaner__ also facilitates the dynamic updating of existing resource configurations. This capability allows you to modify resource specifications based on specific criteria, ensuring alignment with evolving requirements and maintaining resource consistency.
Consider the scenario where you want to update Service objects in the foo namespace to use __version2__ apps.
The __evaluate__ function allows you to select resources, Services in the __foo__ namespace pointing to ``version1`` apps.
The __trasnform__ function will change any such a resources, by updating ``obj.spec.selector["app"]`` to ``version2``.
1. The __evaluate__ function allows you to select resources, Services in the __foo__ namespace pointing to ``version1`` apps.
2. The __trasnform__ function will change any such a resources, by updating ``obj.spec.selector["app"]`` to ``version2``.
```yaml
apiVersion: apps.projectsveltos.io/v1alpha1
kind: Pruner
kind: Cleaner
metadata:
name: pruner-sample3
name: cleaner-sample3
spec:
schedule: "* 0 * * *"
staleResources:
matchingResources:
- namespace: foo
kind: Service
group: ""
version: v1
evaluate: |
-- Define how resources will be selected
function evaluate()
hs = {}
hs.matching = false
Expand All @@ -112,12 +138,53 @@ spec:
end
return hs
end
action: Transform
action: Transform # Update matching resources
transform: |
-- Define how resources will be updated
function transform()
hs = {}
obj.spec.selector["app"] = "version2"
hs.resource = obj
return hs
end
```
## DryRun
To preview which resources match the __Cleaner__'s criteria, set the __DryRun__ flag to true. The Cleaner will still execute its logic but will not actually delete or update any resources. To identify matching resources, search the controller logs for the message "resource is a match for cleaner".
```yaml
apiVersion: apps.projectsveltos.io/v1alpha1
kind: Cleaner
metadata:
name: cleaner-sample1
spec:
schedule: "* 0 * * *" # Runs every day at midnight
dryRun: true # Set to true to preview matching resources
matchingResources:
- namespace: test
kind: Deployment
group: "apps"
version: v1
labelFilters:
- key: serving
operation: Equal
value: api # Match deployments with the "serving" label set to "api"
- key: environment
operation: Different
value: prouction # Match deployments with the "environment" label different from "production"
action: Delete
```
By setting DryRun to true, you can safely test the Cleaner's filtering logic without affecting your actual deployment configurations. Once you're confident in the filtering criteria, you can set DryRun back to false to enable automatic resource deletion.
## Schedule
The __schedule__ field specifies when the __Cleaner__ should run its logic to identify and potentially delete or update matching resources. It adheres to the Cron syntax, which is a widely adopted scheduling language for tasks and events.
The Cron syntax consists of five fields, separated by spaces, each representing a specific part of the scheduling period: minute, hour, day of month, month and day of week, in that order.
It also accepts
- Standard crontab specs, e.g. "* * * * ?"
- Descriptors, e.g. "@midnight", "@every 1h30m"
34 changes: 17 additions & 17 deletions api/v1alpha1/pruner_types.go → api/v1alpha1/cleaner_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ const (
)

const (
// PrunerFinalizer allows Reconciler to clean up resources associated with
// Pruner instance before removing it from the apiserver.
PrunerFinalizer = "prunerfinalizer.projectsveltos.io"
// CleanerFinalizer allows Reconciler to clean up resources associated with
// Cleaner instance before removing it from the apiserver.
CleanerFinalizer = "cleanerfinalizer.projectsveltos.io"
)

type Resources struct {
Expand Down Expand Up @@ -82,9 +82,9 @@ type Resources struct {
Transform string `json:"transform,omitempty"`
}

// PrunerSpec defines the desired state of Pruner
type PrunerSpec struct {
StaleResources []Resources `json:"staleResources"`
// CleanerSpec defines the desired state of Cleaner
type CleanerSpec struct {
MatchingResources []Resources `json:"matchingResources"`

// Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
Schedule string `json:"schedule"`
Expand All @@ -101,8 +101,8 @@ type PrunerSpec struct {
DryRun bool `json:"dryRune,omitempty"`
}

// PrunerStatus defines the observed state of Pruner
type PrunerStatus struct {
// CleanerStatus defines the observed state of Cleaner
type CleanerStatus struct {
// Information when next snapshot is scheduled
// +optional
NextScheduleTime *metav1.Time `json:"nextScheduleTime,omitempty"`
Expand All @@ -117,27 +117,27 @@ type PrunerStatus struct {
}

//+kubebuilder:object:root=true
//+kubebuilder:resource:path=pruners,scope=Cluster
//+kubebuilder:resource:path=cleaners,scope=Cluster
//+kubebuilder:subresource:status

// Pruner is the Schema for the pruners API
type Pruner struct {
// Cleaner is the Schema for the cleaners API
type Cleaner struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

Spec PrunerSpec `json:"spec,omitempty"`
Status PrunerStatus `json:"status,omitempty"`
Spec CleanerSpec `json:"spec,omitempty"`
Status CleanerStatus `json:"status,omitempty"`
}

//+kubebuilder:object:root=true

// PrunerList contains a list of Pruner
type PrunerList struct {
// CleanerList contains a list of Cleaner
type CleanerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pruner `json:"items"`
Items []Cleaner `json:"items"`
}

func init() {
SchemeBuilder.Register(&Pruner{}, &PrunerList{})
SchemeBuilder.Register(&Cleaner{}, &CleanerList{})
}
Loading

0 comments on commit cc51d75

Please sign in to comment.