-
Notifications
You must be signed in to change notification settings - Fork 2.4k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add e2e test for embedded registry mirror
Signed-off-by: Brad Davidson <[email protected]>
- Loading branch information
Showing
2 changed files
with
259 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
ENV['VAGRANT_NO_PARALLEL'] = 'no' | ||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] || | ||
["server-0", "server-1", "server-2", "agent-0", "agent-1"]) | ||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] || | ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) | ||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") | ||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") | ||
GOCOVER = (ENV['E2E_GOCOVER'] || "") | ||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i | ||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i | ||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks | ||
NETWORK_PREFIX = "10.10.10" | ||
install_type = "" | ||
|
||
def provision(vm, role, role_num, node_num) | ||
vm.box = NODE_BOXES[node_num] | ||
vm.hostname = role | ||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 | ||
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" | ||
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" | ||
|
||
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" | ||
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" | ||
load vagrant_defaults | ||
|
||
defaultOSConfigure(vm) | ||
addCoverageDir(vm, role, GOCOVER) | ||
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) | ||
|
||
vm.provision "shell", inline: "ping -c 2 k3s.io" | ||
|
||
# The formatting on this is a little weird, but it allows inserting variables | ||
# and still using the heredoc formatting with escapped quotes | ||
writePrivateRegistry = <<~'SCRIPT'.chomp % {net: NETWORK_PREFIX} | ||
mkdir -p /etc/rancher/k3s/ | ||
echo "mirrors: | ||
docker.io:" > /etc/rancher/k3s/registries.yaml | ||
SCRIPT | ||
|
||
if role.include?("server") && role_num == 0 | ||
vm.provision "private-registry", type: "shell", inline: writePrivateRegistry | ||
dockerInstall(vm) | ||
|
||
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "server " | ||
k3s.config = <<~YAML | ||
token: vagrant | ||
node-external-ip: #{NETWORK_PREFIX}.100 | ||
flannel-iface: eth1 | ||
cluster-init: true | ||
embedded-registry: true | ||
YAML | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
end | ||
|
||
elsif role.include?("server") && role_num != 0 | ||
vm.provision "shell", inline: writePrivateRegistry | ||
|
||
vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "server" | ||
k3s.config = <<~YAML | ||
server: "https://#{NETWORK_PREFIX}.100:6443" | ||
token: vagrant | ||
node-external-ip: #{node_ip} | ||
flannel-iface: eth1 | ||
embedded-registry: true | ||
YAML | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
end | ||
end | ||
|
||
if role.include?("agent") | ||
vm.provision "shell", inline: writePrivateRegistry | ||
|
||
vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "agent" | ||
k3s.config = <<~YAML | ||
server: "https://#{NETWORK_PREFIX}.100:6443" | ||
token: vagrant | ||
node-external-ip: #{node_ip} | ||
flannel-iface: eth1 | ||
disable-default-registry-endpoint: true | ||
YAML | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
end | ||
end | ||
if vm.box.to_s.include?("microos") | ||
vm.provision 'k3s-reload', type: 'reload', run: 'once' | ||
end | ||
end | ||
|
||
|
||
Vagrant.configure("2") do |config| | ||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"] | ||
# Default provider is libvirt, virtualbox is only provided as a backup | ||
config.vm.provider "libvirt" do |v| | ||
v.cpus = NODE_CPUS | ||
v.memory = NODE_MEMORY | ||
end | ||
config.vm.provider "virtualbox" do |v| | ||
v.cpus = NODE_CPUS | ||
v.memory = NODE_MEMORY | ||
end | ||
|
||
if NODE_ROLES.kind_of?(String) | ||
NODE_ROLES = NODE_ROLES.split(" ", -1) | ||
end | ||
if NODE_BOXES.kind_of?(String) | ||
NODE_BOXES = NODE_BOXES.split(" ", -1) | ||
end | ||
|
||
# Must iterate on the index, vagrant does not understand iterating | ||
# over the node roles themselves | ||
NODE_ROLES.length.times do |i| | ||
name = NODE_ROLES[i] | ||
role_num = name.split("-", -1).pop.to_i | ||
config.vm.define name do |node| | ||
provision(node.vm, name, role_num, i) | ||
end | ||
end | ||
end |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,135 @@ | ||
package embeddedmirror | ||
|
||
import ( | ||
"flag" | ||
"fmt" | ||
"os" | ||
"strings" | ||
"testing" | ||
|
||
"github.com/k3s-io/k3s/tests/e2e" | ||
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
) | ||
|
||
// Valid nodeOS: | ||
// generic/ubuntu2004, generic/centos7, generic/rocky8, | ||
// opensuse/Leap-15.3.x86_64 | ||
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") | ||
var serverCount = flag.Int("serverCount", 1, "number of server nodes") | ||
var agentCount = flag.Int("agentCount", 1, "number of agent nodes") | ||
var ci = flag.Bool("ci", false, "running on CI") | ||
var local = flag.Bool("local", false, "deploy a locally built K3s binary") | ||
|
||
// Environment Variables Info: | ||
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) | ||
// E2E_REGISTRY: true/false (default: false) | ||
|
||
func Test_E2EPrivateRegistry(t *testing.T) { | ||
RegisterFailHandler(Fail) | ||
flag.Parse() | ||
suiteConfig, reporterConfig := GinkgoConfiguration() | ||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) | ||
} | ||
|
||
var ( | ||
kubeConfigFile string | ||
serverNodeNames []string | ||
agentNodeNames []string | ||
) | ||
|
||
var _ = ReportAfterEach(e2e.GenReport) | ||
|
||
var _ = Describe("Verify Create", Ordered, func() { | ||
Context("Cluster :", func() { | ||
It("Starts up with no issues", func() { | ||
var err error | ||
if *local { | ||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) | ||
} else { | ||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) | ||
} | ||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) | ||
fmt.Println("CLUSTER CONFIG") | ||
fmt.Println("OS:", *nodeOS) | ||
fmt.Println("Server Nodes:", serverNodeNames) | ||
fmt.Println("Agent Nodes:", agentNodeNames) | ||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) | ||
Expect(err).NotTo(HaveOccurred()) | ||
}) | ||
It("Checks Node and Pod Status", func() { | ||
fmt.Printf("\nFetching node status\n") | ||
Eventually(func(g Gomega) { | ||
nodes, err := e2e.ParseNodes(kubeConfigFile, false) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, node := range nodes { | ||
g.Expect(node.Status).Should(Equal("Ready")) | ||
} | ||
}, "620s", "5s").Should(Succeed()) | ||
_, _ = e2e.ParseNodes(kubeConfigFile, true) | ||
|
||
fmt.Printf("\nFetching Pods status\n") | ||
Eventually(func(g Gomega) { | ||
pods, err := e2e.ParsePods(kubeConfigFile, false) | ||
g.Expect(err).NotTo(HaveOccurred()) | ||
for _, pod := range pods { | ||
if strings.Contains(pod.Name, "helm-install") { | ||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) | ||
} else { | ||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name) | ||
} | ||
} | ||
}, "620s", "5s").Should(Succeed()) | ||
_, _ = e2e.ParsePods(kubeConfigFile, true) | ||
}) | ||
It("Should create and validate deployment with embedded registry mirror using image tag", func() { | ||
res, err := e2e.RunCmdOnNode("kubectl create deployment my-webpage-1 --image=docker.io/library/nginx:1.25.3", serverNodeNames[0]) | ||
fmt.Println(res) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
patchCmd := fmt.Sprintf(`kubectl patch deployment my-webpage-1 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-webpage-1"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) | ||
res, err := e2e.RunCmdOnNode(patchCmd, serverNodeNames[0]) | ||
fmt.Println(res) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
res, err = e2e.RunCmdOnNode("kubectl rollout status deployment my-webpage-1 --watch=true --timeout=360s", serverNodeNames[0]) | ||
fmt.Println(res) | ||
expect(err).NotTo(HaveOccurred()) | ||
}) | ||
|
||
It("Should create and validate deployment with embedded registry mirror using image digest", func() { | ||
res, err := e2e.RunCmdOnNode("kubectl create deployment my-webpage-2 --image=docker.io/library/nginx:sha256:9784f7985f6fba493ba30fb68419f50484fee8faaf677216cb95826f8491d2e9", serverNodeNames[0]) | ||
fmt.Println(res) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
patchCmd := fmt.Sprintf(`kubectl patch deployment my-webpage-2 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-webpage-2"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) | ||
res, err := e2e.RunCmdOnNode(patchCmd, serverNodeNames[0]) | ||
fmt.Println(res) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
res, err = e2e.RunCmdOnNode("kubectl rollout status deployment my-webpage-2 --watch=true --timeout=360s", serverNodeNames[0]) | ||
fmt.Println(res) | ||
expect(err).NotTo(HaveOccurred()) | ||
}) | ||
|
||
}) | ||
}) | ||
|
||
var failed bool | ||
var _ = AfterEach(func() { | ||
failed = failed || CurrentSpecReport().Failed() | ||
}) | ||
|
||
var _ = AfterSuite(func() { | ||
|
||
if failed && !*ci { | ||
fmt.Println("FAILED!") | ||
} else { | ||
r2, err := e2e.RunCmdOnNode("kubectl delete deployment my-webpage-1 my-webpage-2", serverNodeNames[0]) | ||
Expect(err).NotTo(HaveOccurred(), r2) | ||
Expect(err).NotTo(HaveOccurred()) | ||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) | ||
Expect(e2e.DestroyCluster()).To(Succeed()) | ||
Expect(os.Remove(kubeConfigFile)).To(Succeed()) | ||
} | ||
}) |