Skip to content

Commit

Permalink
Rework all existing code to use new API to reconfigure Kubernetes com…
Browse files Browse the repository at this point in the history
…ponents
  • Loading branch information
ilia1243 committed Feb 1, 2024
1 parent 7696737 commit 5b7ce0d
Show file tree
Hide file tree
Showing 7 changed files with 57 additions and 376 deletions.
7 changes: 2 additions & 5 deletions documentation/Maintenance.md
Original file line number Diff line number Diff line change
Expand Up @@ -1032,7 +1032,6 @@ To avoid this, you need to specify custom policy and bind it using `ClusterRoleB

The `manage_psp` procedure executes the following sequence of tasks:

1. check_inventory
1. delete_custom
2. add_custom
3. reconfigure_oob
Expand Down Expand Up @@ -1113,10 +1112,8 @@ application is stateless or stateful. Also shouldn't use `restart-pod: true` opt

The `manage_pss procedure executes the following sequence of tasks:

1. check_inventory
2. delete_default_pss
3. apply_default_pss
4. restart_pods
1. manage_pss
2. restart_pods

## Reboot Procedure

Expand Down
336 changes: 41 additions & 295 deletions kubemarine/admission.py

Large diffs are not rendered by default.

16 changes: 2 additions & 14 deletions kubemarine/k8s_certs.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,20 +48,8 @@ def renew_apply(control_planes: NodeGroup) -> None:
kubernetes.copy_admin_config(log, control_planes)

# for some reason simple pod delete do not work for certs update - we need to delete containers themselves
control_planes.call(force_restart_control_plane)

kubernetes.components.wait_for_pods(control_planes)


def force_restart_control_plane(control_planes: NodeGroup) -> None:
cri_impl = control_planes.cluster.inventory['services']['cri']['containerRuntime']
restart_containers = ["etcd", "kube-scheduler", "kube-apiserver", "kube-controller-manager"]
c_filter = "grep -e %s" % " -e ".join(restart_containers)

if cri_impl == "docker":
control_planes.sudo("sudo docker container rm -f $(sudo docker ps -a | %s | awk '{ print $1 }')" % c_filter, warn=True)
else:
control_planes.sudo("sudo crictl rm -f $(sudo crictl ps -a | %s | awk '{ print $1 }')" % c_filter, warn=True)
control_planes.call(kubernetes.components.restart_components,
components=["etcd", "kube-scheduler", "kube-apiserver", "kube-controller-manager"])


def verify_all_is_absent_or_single(cert_list: List[str]) -> None:
Expand Down
40 changes: 1 addition & 39 deletions kubemarine/procedures/install.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@
from types import FunctionType
from typing import Callable, List, Dict, cast

import yaml
import io

from kubemarine.core.action import Action
from kubemarine.core.cluster import KubernetesCluster
from kubemarine.core.errors import KME
Expand Down Expand Up @@ -198,42 +195,7 @@ def deploy_kubernetes_audit(group: NodeGroup) -> None:
return

kubernetes.prepare_audit_policy(group)

for control_plane in group.get_ordered_members_list():
node_config = control_plane.get_config()
config_new = kubernetes.get_kubeadm_config(cluster.inventory)

# we need InitConfiguration in audit-on-config.yaml file to take into account kubeadm patch for apiserver
init_config = {
'apiVersion': cluster.inventory["services"]["kubeadm"]['apiVersion'],
'kind': 'InitConfiguration',
'localAPIEndpoint': {
'advertiseAddress': node_config['internal_address']
},
'patches': {
'directory': '/etc/kubernetes/patches'
}
}

config_new = config_new + "---\n" + yaml.dump(init_config, default_flow_style=False)

control_plane.put(io.StringIO(config_new), '/etc/kubernetes/audit-on-config.yaml', sudo=True)

kubernetes.create_kubeadm_patches_for_node(cluster, control_plane)

control_plane.sudo(f"kubeadm init phase control-plane apiserver "
f"--config=/etc/kubernetes/audit-on-config.yaml ")

if cluster.inventory['services']['cri']['containerRuntime'] == 'containerd':
control_plane.call(utils.wait_command_successful,
command="crictl rm -f $(sudo crictl ps --name kube-apiserver -q)")
else:
control_plane.call(utils.wait_command_successful,
command="docker stop $(sudo docker ps -q -f 'name=k8s_kube-apiserver'"
" | awk '{print $1}')")
control_plane.call(utils.wait_command_successful, command="kubectl get pod -n kube-system")
control_plane.sudo("kubeadm init phase upload-config kubeadm "
"--config=/etc/kubernetes/audit-on-config.yaml")
group.call(kubernetes.components.reconfigure_components, components=['kube-apiserver'])


@_applicable_for_new_nodes_with_roles('all')
Expand Down
1 change: 0 additions & 1 deletion kubemarine/procedures/manage_psp.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from kubemarine.core.resources import DynamicResources

tasks = OrderedDict({
"check_inventory": admission.check_inventory,
"delete_custom": admission.delete_custom_task,
"add_custom": admission.add_custom_task,
"reconfigure_oob": admission.reconfigure_oob_task,
Expand Down
4 changes: 1 addition & 3 deletions kubemarine/procedures/manage_pss.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@
from kubemarine.core.resources import DynamicResources

tasks = OrderedDict({
"check_inventory": admission.check_inventory,
"delete_default_pss": admission.delete_default_pss,
"apply_default_pss": admission.apply_default_pss,
"manage_pss": admission.manage_pss,
"restart_pods": admission.restart_pods_task
})

Expand Down
29 changes: 10 additions & 19 deletions kubemarine/procedures/upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,33 +51,24 @@ def prepull_images(cluster: KubernetesCluster) -> None:
def kubernetes_upgrade(cluster: KubernetesCluster) -> None:
initial_kubernetes_version = cluster.context['initial_kubernetes_version']

first_control_plane = cluster.nodes["control-plane"].get_first_member()
upgrade_group = kubernetes.get_group_for_upgrade(cluster)
preconfigure_components = []
if (admission.is_pod_security_unconditional(cluster)
and utils.version_key(initial_kubernetes_version)[0:2] < utils.minor_version_key("v1.28")
and cluster.inventory['rbac']['pss']['pod-security'] == 'enabled'):

cluster.log.debug("Updating kubeadm config map")
final_features_list = first_control_plane.call(admission.update_kubeadm_configmap_pss, target_state="enabled")
cluster.log.debug("Updating kube-apiserver configs on control-planes")
cluster.nodes["control-plane"].call(admission.update_kubeapi_config_pss, features_list=final_features_list)
# Extra args of API server have changed, need to reconfigure the API server.
# See enrich_inventory_pss()
preconfigure_components.append('kube-apiserver')

if (kubernetes.kube_proxy_overwrites_higher_system_values(cluster)
if (kubernetes.components.kube_proxy_overwrites_higher_system_values(cluster)
and utils.version_key(initial_kubernetes_version)[0:2] < utils.minor_version_key("v1.29")):
cluster.log.debug("Updating kube-proxy config map")
# Defaults of KubeProxyConfiguration have changed.
# See services.kubeadm_kube-proxy.conntrack.min section of defaults.yaml
preconfigure_components.append('kube-proxy')

def edit_kube_proxy_conntrack_min(kube_proxy_cm: dict) -> dict:
expected_conntrack: dict = cluster.inventory['services']['kubeadm_kube-proxy']['conntrack']
if 'min' not in expected_conntrack:
return kube_proxy_cm

actual_conntrack = kube_proxy_cm['conntrack']
if expected_conntrack['min'] != actual_conntrack.get('min'):
actual_conntrack['min'] = expected_conntrack['min']

return kube_proxy_cm

first_control_plane.call(kubernetes.reconfigure_kube_proxy_configmap, mutate_func=edit_kube_proxy_conntrack_min)
if preconfigure_components:
upgrade_group.call(kubernetes.components.reconfigure_components, components=preconfigure_components)

drain_timeout = cluster.procedure_inventory.get('drain_timeout')
grace_period = cluster.procedure_inventory.get('grace_period')
Expand Down

0 comments on commit 5b7ce0d

Please sign in to comment.