Skip to content

Commit

Permalink
add systemd services for configuration after start
Browse files Browse the repository at this point in the history
the services does the various needed tasks to setup
the ocp or microshift cluster, these systemd  units
runs small shell scripts which are based on:
https://github.com/crc-org/crc-cloud/blob/main/pkg/bundle/setup/clustersetup.sh

and does the following tasks:
- creates crc specific configurations for dnsmasq
- sets a new uuid as cluster id
- creates the pod for routes-controller
- tries to grow the disk and filesystem
- checks if the cluster operators are ready
- adds the pull secret to the cluster
- sets kubeadmin and developer user passwords
- sets a custom ca for authentication
- sets custom nip.io cluster domain
  • Loading branch information
anjannath committed Jan 7, 2025
1 parent 914f90f commit a62f49b
Show file tree
Hide file tree
Showing 25 changed files with 414 additions and 0 deletions.
21 changes: 21 additions & 0 deletions createdisk-library.sh
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ function prepare_hyperV() {
echo 'CONST{virt}=="microsoft", RUN{builtin}+="kmod load hv_sock"' > /etc/udev/rules.d/90-crc-vsock.rules
EOF
}

function prepare_qemu_guest_agent() {
local vm_ip=$1

Expand Down Expand Up @@ -400,3 +401,23 @@ function remove_pull_secret_from_disk() {
esac
}

function copy_systemd_units() {
${SSH} core@${VM_IP} -- 'mkdir -p /home/core/systemd-units && mkdir -p /home/core/systemd-scripts'
${SCP} systemd/crc-*.service core@${VM_IP}:/home/core/systemd-units/
${SCP} systemd/crc-*.path core@${VM_IP}:/home/core/systemd-units/
${SCP} systemd/crc-*.sh core@${VM_IP}:/home/core/systemd-scripts/

case "${BUNDLE_TYPE}" in
"snc"|"okd")
${SCP} systemd/ocp-*.service core@${VM_IP}:/home/core/systemd-units/
${SCP} systemd/ocp-*.path core@${VM_IP}:/home/core/systemd-units/
${SCP} systemd/ocp-*.sh core@${VM_IP}:/home/core/systemd-scripts/
;;
esac

${SSH} core@${VM_IP} -- 'sudo cp /home/core/systemd-units/* /etc/systemd/system/ && sudo cp /home/core/systemd-scripts/* /usr/local/bin/'
${SSH} core@${VM_IP} -- 'ls /home/core/systemd-scripts/ | xargs -t -I % sudo chmod +x /usr/local/bin/%'
${SSH} core@${VM_IP} -- 'sudo restorecon -rv /usr/local/bin'
${SSH} core@${VM_IP} -- 'ls /home/core/systemd-units/ | xargs sudo systemctl enable'
${SSH} core@${VM_IP} -- 'rm -rf /home/core/systemd-units /home/core/systemd-scripts'
}
2 changes: 2 additions & 0 deletions createdisk.sh
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ if [ "${ARCH}" == "aarch64" ] && [ ${BUNDLE_TYPE} != "okd" ]; then
${SSH} core@${VM_IP} -- "sudo rpm-ostree install https://kojipkgs.fedoraproject.org//packages/qemu/8.2.6/3.fc40/aarch64/qemu-user-static-x86-8.2.6-3.fc40.aarch64.rpm"
fi

copy_systemd_units

cleanup_vm_image ${VM_NAME} ${VM_IP}

# Delete all the pods and lease from the etcd db so that when this bundle is use for the cluster provision, everything comes up in clean state.
Expand Down
24 changes: 24 additions & 0 deletions notes-self-sufficient.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Self sufficient bundles

Since release 4.19.0 of OpenShift Local, the bundles generated by `snc` contains additional systemd services to provision the cluster and removes the need of
an outside entity for provision the cluster, although an outside process needs to create some files on pre-defined locations inside the VM for the systemd
services to do their work.

## The following table lists the systemd services and the location of files they need to provision the cluster, users of SNC needs to create those files

| Systemd unit | runs for (ocp, microshift, both) | Input files location | Marker env variables |
| :-------------------: | :------------------------------: | :----------------------------------: | :------------------: |
| crc-cluster-status | both | none | none |
| crc-pullsecret | both | /opt/crc/pull-secret | none |
| crc-dnsmasq | both | none | none |
| crc-routes-controller | both | none | none |
| ocp-cluster-ca | ocp | /opt/crc/custom-ca.crt | CRC_CLOUD=1 |
| ocp-clusterid | ocp | none | none |
| ocp-custom-domain | ocp | none | CRC_CLOUD=1 |
| ocp-growfs | ocp | none | none |
| ocp-userpasswords | ocp | /opt/crc/pass_{kubeadmin, developer} | none |

> [!NOTE]
> "Marker env variable" is set using an env file, if the required env variable is not set then unit is skipped
> some units are ran only when CRC_CLOUD=1 is set, these are only needed when using the bundles with crc-cloud
12 changes: 12 additions & 0 deletions systemd/crc-cluster-status.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[Unit]
Description=CRC Unit checking if cluster is ready
After=kubelet.service
Requires=kubelet.service

[Service]
Type=oneshot
ExecStart=/usr/local/bin/crc-cluster-status.sh
RemainAfterExit=true

[Install]
WantedBy=multi-user.target
43 changes: 43 additions & 0 deletions systemd/crc-cluster-status.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash

set -x

export KUBECONFIG=/opt/kubeconfig

function check_cluster_healthy() {
WAIT="authentication|console|etcd|ingress|openshift-apiserver"

until `oc get co > /dev/null 2>&1`
do
sleep 2
done

for i in $(oc get co | grep -P "$WAIT" | awk '{ print $3 }')
do
if [[ $i == "False" ]]
then
return 1
fi
done
return 0
}

# rm -rf /tmp/.crc-cluster-ready

COUNTER=0
CLUSTER_HEALTH_SLEEP=8
CLUSTER_HEALTH_RETRIES=500

while ! check_cluster_healthy
do
sleep $CLUSTER_HEALTH_SLEEP
if [[ $COUNTER == $CLUSTER_HEALTH_RETRIES ]]
then
return 1
fi
((COUNTER++))
done

# need to set a marker to let `crc` know the cluster is ready
# touch /tmp/.crc-cluster-ready

13 changes: 13 additions & 0 deletions systemd/crc-dnsmasq.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[Unit]
Description=CRC Unit for configuring dnsmasq
Requires=ovs-configuration.service
After=ovs-configuration.service

[Service]
Type=oneshot
ExecCondition=/usr/bin/bash -c "/usr/sbin/ip link show dev tap0 && exit 1 || exit 0"
ExecStart=/usr/local/bin/crc-dnsmasq.sh
ExecStartPost=/usr/bin/systemctl start dnsmasq.service

[Install]
WantedBy=multi-user.target
19 changes: 19 additions & 0 deletions systemd/crc-dnsmasq.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/bash

set -x

hostName=$(hostname)
hostIp=$(hostname --all-ip-addresses | awk '{print $1}')

cat << EOF > /etc/dnsmasq.d/crc-dnsmasq.conf
interface=br-ex
expand-hosts
log-queries
local=/crc.testing/
domain=crc.testing
address=/apps-crc.testing/$hostIp
address=/api.crc.testing/$hostIp
address=/api-int.crc.testing/$hostIp
address=/$hostName.crc.testing/$hostIp
EOF

11 changes: 11 additions & 0 deletions systemd/crc-pullsecret.path
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[Unit]
Description=CRC Unit for monitoring the pull secret path
After=kubelet.service

[Path]
PathExists=/opt/crc/pull-secret
TriggerLimitIntervalSec=1min
TriggerLimitBurst=0

[Install]
WantedBy=multi-user.target
11 changes: 11 additions & 0 deletions systemd/crc-pullsecret.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[Unit]
Description=CRC Unit for adding pull secret to cluster
After=kubelet.service
Requires=kubelet.service

[Service]
Type=oneshot
ExecStart=/usr/local/bin/crc-pullsecret.sh

[Install]
WantedBy=multi-user.target
21 changes: 21 additions & 0 deletions systemd/crc-pullsecret.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/bash

set -x

source /usr/local/bin/crc-systemd-common.sh
export KUBECONFIG="/opt/kubeconfig"

wait_for_resource secret

# check if existing pull-secret is valid if not add the one from /opt/crc/pull-secret
existingPsB64=$(oc get secret pull-secret -n openshift-config -o jsonpath="{['data']['\.dockerconfigjson']}")
existingPs=$(echo "${existingPsB64}" | base64 -d)

echo "${existingPs}" | jq -e '.auths'

if [[ $? != 0 ]]; then
pullSecretB64=$(cat /opt/crc/pull-secret | base64 -w0)
oc patch secret pull-secret -n openshift-config --type merge -p "{\"data\":{\".dockerconfigjson\":\"${pullSecretB64}\"}}"
rm -f /opt/crc/pull-secret
fi

12 changes: 12 additions & 0 deletions systemd/crc-routes-controller.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[Unit]
Description=CRC Unit starting routes controller
After=kubelet.service
Requires=kubelet.service

[Service]
Type=oneshot
ExecCondition=/usr/bin/bash -c "/usr/bin/ping -c1 gateway && exit 1 || exit 0"
ExecStart=/usr/local/bin/crc-routes-controller.sh

[Install]
WantedBy=multi-user.target
11 changes: 11 additions & 0 deletions systemd/crc-routes-controller.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash

set -x

source /usr/local/bin/crc-systemd-common.sh
export KUBECONFIG=/opt/kubeconfig

wait_for_resource pods

oc apply -f /opt/crc/routes-controller.yaml

12 changes: 12 additions & 0 deletions systemd/crc-systemd-common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# $1 is the resource to check
# $2 is an optional maximum retry count; default 20
function wait_for_resource() {
local retry=0
local max_retry=${2:-20}
until `oc get "$1" > /dev/null 2>&1`
do
[ $retry == $max_retry ] && exit 1
sleep 5
((retry++))
done
}
11 changes: 11 additions & 0 deletions systemd/ocp-cluster-ca.path
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[Unit]
Description=CRC Unit monitoring custom-ca.crt file path
After=kubelet.service

[Path]
PathExists=/opt/crc/custom-ca.crt
TriggerLimitIntervalSec=1min
TriggerLimitBurst=0

[Install]
WantedBy=multi-user.target
11 changes: 11 additions & 0 deletions systemd/ocp-cluster-ca.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[Unit]
Description=CRC Unit setting custom cluster ca
After=kubelet.service
Requires=kubelet.service

[Service]
Type=oneshot
ExecStart=/usr/local/bin/ocp-cluster-ca.sh

[Install]
WantedBy=multi-user.target
26 changes: 26 additions & 0 deletions systemd/ocp-cluster-ca.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/bin/bash

set -x

source /usr/local/bin/crc-systemd-common.sh
export KUBECONFIG="/opt/kubeconfig"

wait_for_resource configmap

custom_ca_path=/opt/crc/custom-ca.crt

# retry=0
# max_retry=20
# until `ls ${custom_ca_path} > /dev/null 2>&1`
# do
# [ $retry == $max_retry ] && exit 1
# sleep 5
# ((retry++))
# done

oc create configmap client-ca-custom -n openshift-config --from-file=ca-bundle.crt=${custom_ca_path}
oc patch apiserver cluster --type=merge -p '{"spec": {"clientCA": {"name": "client-ca-custom"}}}'
oc create configmap admin-kubeconfig-client-ca -n openshift-config --from-file=ca-bundle.crt=${custom_ca_path} \
--dry-run -o yaml | oc replace -f -

rm -f /opt/crc/custom-ca.crt
11 changes: 11 additions & 0 deletions systemd/ocp-clusterid.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[Unit]
Description=CRC Unit setting random cluster ID
After=kubelet.service
Requires=kubelet.service

[Service]
Type=oneshot
ExecStart=/usr/local/bin/ocp-clusterid.sh

[Install]
WantedBy=multi-user.target
11 changes: 11 additions & 0 deletions systemd/ocp-clusterid.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash

set -x

source /usr/local/bin/crc-systemd-common.sh
export KUBECONFIG="/opt/kubeconfig"
uuid=$(uuidgen)

wait_for_resource clusterversion

oc patch clusterversion version -p "{\"spec\":{\"clusterID\":\"${uuid}\"}}" --type merge
12 changes: 12 additions & 0 deletions systemd/ocp-custom-domain.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[Unit]
Description=CRC Unit setting nip.io domain for cluster
After=kubelet.service
Requires=kubelet.service

[Service]
Type=oneshot
EnvironmentFile=/opt/crc/crc-cloud
ExecStart=/usr/local/bin/ocp-custom-domain.sh

[Install]
WantedBy=multi-user.target
47 changes: 47 additions & 0 deletions systemd/ocp-custom-domain.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#!/bin/bash

set -x

if [ -z $CRC_CLOUD ]; then
echo "Not running in crc-cloud mode"
exit 0
fi

source /usr/local/bin/crc-systemd-common.sh
export KUBECONFIG="/opt/kubeconfig"
export EIP=$(hostname -i)

STEPS_SLEEP_TIME=30

wait_for_resource secret

# create cert and add as secret
openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout nip.key -out nip.crt -subj "/CN=$EIP.nip.io" -addext "subjectAltName=DNS:apps.$EIP.nip.io,DNS:*.apps.$EIP.nip.io,DNS:api.$EIP.nip.io"
oc create secret tls nip-secret --cert=nip.crt --key=nip.key -n openshift-config
sleep $STEPS_SLEEP_TIME

# patch ingress
cat <<EOF > ingress-patch.yaml
spec:
appsDomain: apps.$EIP.nip.io
componentRoutes:
- hostname: console-openshift-console.apps.$EIP.nip.io
name: console
namespace: openshift-console
servingCertKeyPairSecret:
name: nip-secret
- hostname: oauth-openshift.apps.$EIP.nip.io
name: oauth-openshift
namespace: openshift-authentication
servingCertKeyPairSecret:
name: nip-secret
EOF
oc patch ingresses.config.openshift.io cluster --type=merge --patch-file=ingress-patch.yaml

# patch API server to use new CA secret
oc patch apiserver cluster --type=merge -p '{"spec":{"servingCerts": {"namedCertificates":[{"names":["api.'$EIP'.nip.io"],"servingCertificate": {"name": "nip-secret"}}]}}}'

# patch image registry route
oc patch -p '{"spec": {"host": "default-route-openshift-image-registry.'$EIP'.nip.io"}}' route default-route -n openshift-image-registry --type=merge

#wait_cluster_become_healthy "authentication|console|etcd|ingress|openshift-apiserver"
9 changes: 9 additions & 0 deletions systemd/ocp-growfs.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[Unit]
Description=CRC Unit to grow the root filesystem

[Service]
Type=oneshot
ExecStart=/usr/local/bin/ocp-growfs.sh

[Install]
WantedBy=multi-user.target
11 changes: 11 additions & 0 deletions systemd/ocp-growfs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash

set -x

root_partition=$(/usr/sbin/blkid -t TYPE=xfs -o device)
/usr/bin/growpart "${root_partition#?}" "${root_partition#/dev/???}"

rootFS="/sysroot"
mount -o remount,rw "${rootFS}"
xfs_growfs "${rootFS}"
#mount -o remount,ro "${rootFS}"
Loading

0 comments on commit a62f49b

Please sign in to comment.