Skip to content

Commit

Permalink
dbg: multus failure
Browse files Browse the repository at this point in the history
  • Loading branch information
usrbinkat committed Mar 25, 2024
1 parent 31bba07 commit fb72f40
Show file tree
Hide file tree
Showing 24 changed files with 1,238 additions and 153 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,4 @@ Pulumi.*.yaml
tmp.*
tmp
.ssh
tmp
15 changes: 14 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ $(warning GITHUB_TOKEN is not set)
endif

# --- Targets ---
.PHONY: help detect-arch pulumi-login pulumi-up up talos-gen-config talos-cluster kind-cluster clean clean-all act konductor test-kind test-talos stop
.PHONY: help detect-arch pulumi-login pulumi-up up talos-gen-config talos-cluster kind-cluster clean clean-all act konductor test-kind test-talos stop force-terminating-ns

# --- Default Command ---
all: help
Expand Down Expand Up @@ -234,3 +234,16 @@ stop: clean
@echo "Stopping Codespaces..."
@gh codespace --codespace ${CODESPACE_NAME} stop
@echo "Codespaces stopped."

# --- Stop Codespaces ---
force-terminating-ns:
@kubectl proxy & \
PROXY_PID=$$! ;\
sleep 2 ;\
kubectl get namespaces --field-selector=status.phase=Terminating -o json | jq -r '.items[].metadata.name' | while read NAMESPACE ; do \
echo "Clearing finalizers for namespace $$NAMESPACE" ;\
kubectl get namespace $$NAMESPACE -o json | jq '.spec = {"finalizers":[]}' > temp-$$NAMESPACE.json ;\
curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp-$$NAMESPACE.json 127.0.0.1:8001/api/v1/namespaces/$$NAMESPACE/finalize ;\
rm -f temp-$$NAMESPACE.json ;\
done ;\
kill $$PROXY_PID
83 changes: 42 additions & 41 deletions __main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,25 +49,39 @@ def main():
# Create namespaces
namespace_objects = create_namespaces(namespaces, k8s_provider)

# Deploy Cilium
l2_bridge_name = "br0"
l2announcements = "192.168.1.70/28"
cilium_helm_release = deploy_cilium(
"cilium-release",
# # Deploy Cilium
# l2_bridge_name = "br0"
# l2announcements = "192.168.1.70/28"
# cilium_helm_release = deploy_cilium(
# "cilium-release",
# k8s_provider,
# kubernetes_distribution,
# "kargo",
# kubernetes_endpoint_ip.ips,
# "kube-system",
# l2_bridge_name,
# l2announcements
# )

# Enable cert_manager witht the following command:
# ~$ pulumi config set cert_manager.enabled true
# Deploy Cert Manager
cert_manager = deploy_cert_manager(
"kargo",
k8s_provider,
kubernetes_distribution,
"kargo",
kubernetes_endpoint_ip.ips,
"kube-system",
l2_bridge_name,
l2announcements
"cert-manager"
)
#pulumi.export('cert_manager', cert_manager)

# Deploy KubeVirt
kubevirt_version = deploy_kubevirt(
k8s_provider,
kubernetes_distribution
kubernetes_distribution,
cert_manager
)

# Deploy CDI
containerized_data_importer = deploy_cdi(
k8s_provider
Expand Down Expand Up @@ -97,6 +111,21 @@ def main():
)
#pulumi.export('local_path_provisioner', local_path_provisioner)

# check if pulumi config ceph.enabled is set to true and deploy rook-ceph if it is
# Enable ceph operator with the following command:
# ~$ pulumi config set ceph.enabled true
deploy_ceph = config.get_bool('ceph.enabled') or False
if deploy_ceph:
# Deploy Rook Ceph
rook_operator = deploy_rook_operator(
"kargo",
k8s_provider,
kubernetes_distribution,
"kargo",
"rook-ceph"
)
pulumi.export('rook_operator', rook_operator)

# check if hostpath-provisioner pulumi config hostpath_provisioner.enabled is set to true and deploy if it is
# Enable hostpath-provisioner with the following command:
# ~$ pulumi config set hostpath_provisioner.enabled true
Expand All @@ -116,38 +145,10 @@ def main():
k8s_provider,
hostpath_default_path,
hostpath_default_storage_class,
hostpath_version
)
pulumi.export('hostpath_provisioner', hostpath_provisioner["namespace"])

# check if pulumi config ceph.enabled is set to true and deploy rook-ceph if it is
# Enable ceph operator with the following command:
# ~$ pulumi config set ceph.enabled true
deploy_ceph = config.get_bool('ceph.enabled') or False
if deploy_ceph:
# Deploy Rook Ceph
rook_operator = deploy_rook_operator(
"kargo",
k8s_provider,
kubernetes_distribution,
"kargo",
"rook-ceph"
)
pulumi.export('rook_operator', rook_operator)

# Enable cert_manager witht the following command:
# ~$ pulumi config set cert_manager.enabled true
cert_manager_enabled = config.get_bool('cert_manager.enabled') or False
if cert_manager_enabled:
# Deploy Cert Manager
cert_manager = deploy_cert_manager(
"kargo",
k8s_provider,
kubernetes_distribution,
"kargo",
"cert-manager"
hostpath_version,
cert_manager
)
pulumi.export('cert_manager', cert_manager)
#pulumi.export('hostpath_provisioner', hostpath_provisioner["version"])

# check if pulumi config openunison.enabled is set to true and deploy openunison if it is
openunison_enabled = config.get_bool('openunison.enabled') or False
Expand Down
71 changes: 41 additions & 30 deletions hack/pod-br0.yaml
Original file line number Diff line number Diff line change
@@ -1,43 +1,54 @@
#---
## This net-attach-def defines macvlan-conf with
## + ips capabilities to specify ip in pod annotation and
## + mac capabilities to specify mac address in pod annotation
## default gateway is defined as well
#apiVersion: "k8s.cni.cncf.io/v1"
#kind: NetworkAttachmentDefinition
#metadata:
# name: pod-br0
#spec:
# config: '{
# "cniVersion": "0.3.1",
# "name": "br0",
# "plugins": [
# {
# "type": "bridge",
# "bridge": "br0",
# "mode": "bridge",
# "ipam": {}
# },{
# "type": "tuning"
# }
# ]
# }'
---
# This net-attach-def defines macvlan-conf with
# + ips capabilities to specify ip in pod annotation and
# + mac capabilities to specify mac address in pod annotation
# default gateway is defined as well
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: br0
spec:
config: '{
"cniVersion": "0.3.1",
"name": "br0",
"plugins": [
{
"type": "bridge",
"bridge": "br0",
"mode": "bridge",
"ipam": {}
},{
"type": "tuning"
}
]
}'
---
# Define a pod with macvlan-conf, defined above, with ip address and mac, and
# "gateway" overrides default gateway to use macvlan-conf's one.
# without "gateway" in k8s.v1.cni.cncf.io/networks, default route will be cluster
# network interface, eth0, even tough macvlan-conf has default gateway config.
#apiVersion: v1
#kind: Pod
#metadata:
# name: samplepod
# annotations:
# k8s.v1.cni.cncf.io/networks: '[{"name": "br0"}]'
#spec:
# containers:
# - name: samplepod
# command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
# image: dougbtv/centos-network
# ports:
# - containerPort: 80
# automountServiceAccountToken: false
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: '[{"name": "br0"}]'
k8s.v1.cni.cncf.io/networks: br0
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: dougbtv/centos-network
ports:
- containerPort: 80
automountServiceAccountToken: false
command: ["/bin/ash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: alpine
22 changes: 11 additions & 11 deletions metal/3node-optiplex-cluster/41.controlplane.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ machine:
- 192.168.1.41
- 192.168.1.42
- 192.168.1.43
- api.kube.optiplexprime.kargo.dev
- api.optiplexprime.kargo.dev
kubelet:
image: ghcr.io/siderolabs/kubelet:v1.29.0
image: ghcr.io/siderolabs/kubelet:v1.29.3
defaultRuntimeSeccompProfileEnabled: true
disableManifestsDirectory: true
clusterDNS:
Expand All @@ -34,7 +34,7 @@ machine:
network:
kubespan:
enabled: false
hostname: cp1.kube.optiplexprime.kargo.dev
hostname: cp1.optiplexprime.kargo.dev
nameservers:
- 192.168.1.1
- 8.8.8.8
Expand All @@ -43,19 +43,19 @@ machine:
- ip: 192.168.1.40
aliases:
- api
- api.kube.optiplexprime.kargo.dev
- api.optiplexprime.kargo.dev
- ip: 192.168.1.41
aliases:
- cp1
- cp1.kube.optiplexprime.kargo.dev
- cp1.optiplexprime.kargo.dev
- ip: 192.168.1.42
aliases:
- cp2
- cp2.kube.optiplexprime.kargo.dev
- cp2.optiplexprime.kargo.dev
- ip: 192.168.1.43
aliases:
- cp3
- cp3.kube.optiplexprime.kargo.dev
- cp3.optiplexprime.kargo.dev
interfaces:
- interface: br0
mtu: 1500
Expand Down Expand Up @@ -140,13 +140,13 @@ cluster:
serviceAccount:
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSURqc2tBeS9WeFJuRTFTUGxIblJoRUcvdnNjMFd1WjUxdVBGNUJvbmpDVENvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFeFRCdEFoT3hMMnJnaVFPdEYrVTRoR05QMkV6bWZScXBPT01iRTJhSjRWNkMva1JlbHNtKwpaOTZ2d3RzODBWWnhSM0tZajVtL3BxV0gra3FvcUU2SWR3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
apiServer:
image: registry.k8s.io/kube-apiserver:v1.29.0
image: registry.k8s.io/kube-apiserver:v1.29.3
certSANs:
- 192.168.1.40
- 192.168.1.41
- 192.168.1.42
- 192.168.1.43
- api.kube.optiplexprime.kargo.dev
- api.optiplexprime.kargo.dev
disablePodSecurityPolicy: true
admissionControl: []
#- name: PodSecurity
Expand All @@ -171,9 +171,9 @@ cluster:
rules:
- level: Metadata
controllerManager:
image: registry.k8s.io/kube-controller-manager:v1.29.0
image: registry.k8s.io/kube-controller-manager:v1.29.3
scheduler:
image: registry.k8s.io/kube-scheduler:v1.29.0
image: registry.k8s.io/kube-scheduler:v1.29.3
discovery:
enabled: true
registries:
Expand Down
22 changes: 11 additions & 11 deletions metal/3node-optiplex-cluster/42.controlplane.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ machine:
- 192.168.1.41
- 192.168.1.42
- 192.168.1.43
- api.kube.optiplexprime.kargo.dev
- api.optiplexprime.kargo.dev
kubelet:
image: ghcr.io/siderolabs/kubelet:v1.29.0
image: ghcr.io/siderolabs/kubelet:v1.29.3
defaultRuntimeSeccompProfileEnabled: true
disableManifestsDirectory: true
clusterDNS:
Expand All @@ -34,7 +34,7 @@ machine:
network:
kubespan:
enabled: false
hostname: cp2.kube.optiplexprime.kargo.dev
hostname: cp2.optiplexprime.kargo.dev
nameservers:
- 192.168.1.1
- 8.8.8.8
Expand All @@ -43,19 +43,19 @@ machine:
- ip: 192.168.1.40
aliases:
- api
- api.kube.optiplexprime.kargo.dev
- api.optiplexprime.kargo.dev
- ip: 192.168.1.41
aliases:
- cp1
- cp1.kube.optiplexprime.kargo.dev
- cp1.optiplexprime.kargo.dev
- ip: 192.168.1.42
aliases:
- cp2
- cp2.kube.optiplexprime.kargo.dev
- cp2.optiplexprime.kargo.dev
- ip: 192.168.1.43
aliases:
- cp3
- cp3.kube.optiplexprime.kargo.dev
- cp3.optiplexprime.kargo.dev
interfaces:
- interface: br0
mtu: 1500
Expand Down Expand Up @@ -140,13 +140,13 @@ cluster:
serviceAccount:
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSURqc2tBeS9WeFJuRTFTUGxIblJoRUcvdnNjMFd1WjUxdVBGNUJvbmpDVENvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFeFRCdEFoT3hMMnJnaVFPdEYrVTRoR05QMkV6bWZScXBPT01iRTJhSjRWNkMva1JlbHNtKwpaOTZ2d3RzODBWWnhSM0tZajVtL3BxV0gra3FvcUU2SWR3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
apiServer:
image: registry.k8s.io/kube-apiserver:v1.29.0
image: registry.k8s.io/kube-apiserver:v1.29.3
certSANs:
- 192.168.1.40
- 192.168.1.41
- 192.168.1.42
- 192.168.1.43
- api.kube.optiplexprime.kargo.dev
- api.optiplexprime.kargo.dev
disablePodSecurityPolicy: true
admissionControl: []
#- name: PodSecurity
Expand All @@ -171,9 +171,9 @@ cluster:
rules:
- level: Metadata
controllerManager:
image: registry.k8s.io/kube-controller-manager:v1.29.0
image: registry.k8s.io/kube-controller-manager:v1.29.3
scheduler:
image: registry.k8s.io/kube-scheduler:v1.29.0
image: registry.k8s.io/kube-scheduler:v1.29.3
discovery:
enabled: true
registries:
Expand Down
Loading

0 comments on commit fb72f40

Please sign in to comment.