From 825f0b08ae9c39b89cc61f74ab57ee840046ec97 Mon Sep 17 00:00:00 2001 From: Derek Nola Date: Mon, 5 Feb 2024 12:42:54 -0800 Subject: [PATCH] Fix internal killall, use start E2E, not a new test Signed-off-by: Derek Nola --- cmd/k3s/main.go | 2 +- contrib/util/k3s-killall.sh | 19 +++-- pkg/cli/cmds/killall.go | 4 +- scripts/package-cli | 2 +- tests/e2e/killall/Vagrantfile | 82 --------------------- tests/e2e/killall/killall_test.go | 115 ------------------------------ tests/e2e/startup/startup_test.go | 63 ++++++++++++++-- 7 files changed, 69 insertions(+), 218 deletions(-) mode change 100644 => 100755 contrib/util/k3s-killall.sh delete mode 100644 tests/e2e/killall/Vagrantfile delete mode 100644 tests/e2e/killall/killall_test.go diff --git a/cmd/k3s/main.go b/cmd/k3s/main.go index 922534578448..5773038feb0c 100644 --- a/cmd/k3s/main.go +++ b/cmd/k3s/main.go @@ -52,7 +52,7 @@ func main() { cmds.NewCRICTL(externalCLIAction("crictl", dataDir)), cmds.NewCtrCommand(externalCLIAction("ctr", dataDir)), cmds.NewCheckConfigCommand(externalCLIAction("check-config", dataDir)), - cmds.NewKillAllCommand(externalCLIAction("test", dataDir)), + cmds.NewKillAllCommand(externalCLIAction("k3s-killall", dataDir)), cmds.NewTokenCommands( tokenCommand, tokenCommand, diff --git a/contrib/util/k3s-killall.sh b/contrib/util/k3s-killall.sh old mode 100644 new mode 100755 index 314c7d31f601..5df8b754b710 --- a/contrib/util/k3s-killall.sh +++ b/contrib/util/k3s-killall.sh @@ -1,11 +1,11 @@ -#!/bin/sh +#!/usr/bin/env sh for bin in /var/lib/rancher/k3s/data/**/bin/; do [ -d "$bin" ] && export PATH=$PATH:$bin:$bin/aux done - +set -x for service in /etc/systemd/system/k3s*.service; do [ -s "$service" ] && systemctl stop "$(basename "$service")" @@ -25,21 +25,22 @@ pschildren() { pstree() { for pid in "$@"; do - if [ -n "$$" ]; then - echo "$pid" + # Don't return the current process + if [ "$pid" != "$$" ]; then + echo "$pid" + fi for child in $(pschildren "$pid"); do pstree "$child" done - fi done } killtree() { - kill -9 "$( + kill -9 $( { set +x; } 2>/dev/null; pstree "$@"; set -x; - )" 2>/dev/null + ) 2>/dev/null } remove_interfaces() { @@ -67,13 +68,11 @@ getshims() { ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 } - - do_unmount_and_remove() { set +x while read -r _ path _; do case "$path" in $1*) echo "$path" ;; esac - done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"' + done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount -f "$0" && rm -rf "$0"' set -x } diff --git a/pkg/cli/cmds/killall.go b/pkg/cli/cmds/killall.go index bda5539885c1..dfe718a01ff2 100644 --- a/pkg/cli/cmds/killall.go +++ b/pkg/cli/cmds/killall.go @@ -4,8 +4,8 @@ import "github.com/urfave/cli" func NewKillAllCommand(action func(*cli.Context) error) cli.Command { return cli.Command{ - Name: "k3s-killall", - Usage: "Run k3s-killall.sh script", + Name: "killall", + Usage: "Kill all K3s and associated child processes", SkipFlagParsing: true, SkipArgReorder: true, Action: action, diff --git a/scripts/package-cli b/scripts/package-cli index c5b63ae16e56..7a2dc9b93882 100755 --- a/scripts/package-cli +++ b/scripts/package-cli @@ -18,7 +18,7 @@ for i in bandwidth bridge firewall flannel host-local loopback portmap; do done cp contrib/util/check-config.sh bin/check-config -cp contrib/util/k3s-killall.sh bin/test +cp contrib/util/k3s-killall.sh bin/k3s-killall rm -rf build/data mkdir -p build/data build/out diff --git a/tests/e2e/killall/Vagrantfile b/tests/e2e/killall/Vagrantfile deleted file mode 100644 index 80e3cb13a53e..000000000000 --- a/tests/e2e/killall/Vagrantfile +++ /dev/null @@ -1,82 +0,0 @@ -ENV['VAGRANT_NO_PARALLEL'] = 'no' -NODE_ROLES = (ENV['E2E_NODE_ROLES'] || - ["server-0", "agent-0", "agent-1" ]) -NODE_BOXES = (ENV['E2E_NODE_BOXES'] || - ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) -GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") -RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") -GOCOVER = (ENV['E2E_GOCOVER'] || "") -NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i -NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i -# This key must be created using tailscale web -NETWORK_PREFIX = "10.10.10" -install_type = "" - -def provision(vm, roles, role_num, node_num) - vm.box = NODE_BOXES[node_num] - vm.hostname = "#{roles[0]}-#{role_num}" - node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" - vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" - load vagrant_defaults - - defaultOSConfigure(vm) - addCoverageDir(vm, roles, GOCOVER) - install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) - - vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io" - - if roles.include?("server") && role_num == 0 - vm.provision :k3s, run: 'once' do |k3s| - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - k3s.args = "server " - k3s.config = <<~YAML - cluster-init: true - token: vagrant - node-external-ip: #{node_ip} - flannel-iface: eth1 - YAML - k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] - end - end - if roles.include?("agent") - vm.provision :k3s, run: 'once' do |k3s| - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - k3s.args = "agent " - k3s.config = <<~YAML - server: "https://#{NETWORK_PREFIX}.100:6443" - node-external-ip: #{node_ip} - flannel-iface: eth1 - YAML - k3s.env = ["K3S_KUBECONFIG_MODE=0644", "INSTALL_K3S_SKIP_START=true", install_type] - end - end -end - -Vagrant.configure("2") do |config| - config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt", "vagrant-scp"] - config.vm.provider "libvirt" do |v| - v.cpus = NODE_CPUS - v.memory = NODE_MEMORY - end - - if NODE_ROLES.kind_of?(String) - NODE_ROLES = NODE_ROLES.split(" ", -1) - end - if NODE_BOXES.kind_of?(String) - NODE_BOXES = NODE_BOXES.split(" ", -1) - end - - # Must iterate on the index, vagrant does not understand iterating - # over the node roles themselves - NODE_ROLES.length.times do |i| - name = NODE_ROLES[i] - config.vm.define name do |node| - roles = name.split("-", -1) - role_num = roles.pop.to_i - provision(node.vm, roles, role_num, i) - end - end -end diff --git a/tests/e2e/killall/killall_test.go b/tests/e2e/killall/killall_test.go deleted file mode 100644 index 5a2a67a4dd32..000000000000 --- a/tests/e2e/killall/killall_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package killall - -import ( - "flag" - "fmt" - "os" - "strings" - "testing" - - "github.com/k3s-io/k3s/tests/e2e" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -// Valid nodeOS: -// generic/ubuntu2004, generic/centos7, generic/rocky8, -// opensuse/Leap-15.3.x86_64 -var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") -var ci = flag.Bool("ci", false, "running on CI") -var local = flag.Bool("local", false, "deploy a locally built K3s binary") - -// Environment Variables Info: -// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) -// E2E_REGISTRY: true/false (default: false) - -func Test_E2EKillAll(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - suiteConfig, reporterConfig := GinkgoConfiguration() - RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) -} - -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) - -var _ = ReportAfterEach(e2e.GenReport) - -var _ = Describe("Verify Create", Ordered, func() { - Context("Cluster :", func() { - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 0) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - }) - It("Tries to kill k3s processes", func() { - cmd := "k3s k3s-killall" - out, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - if err != nil { - fmt.Println(err.Error()) - Expect(err).ToNot(HaveOccurred()) - } - fmt.Println(out) - process, err := e2e.RunCmdOnNode("ps aux | grep k3s", serverNodeNames[0]) - if err != nil { - fmt.Println(err.Error()) - Expect(err).ToNot(HaveOccurred()) - } - fmt.Println(process) - }) - }) - -}) -var failed bool -var _ = AfterEach(func() { - failed = failed || CurrentSpecReport().Failed() -}) - -var _ = AfterSuite(func() { - - if failed && !*ci { - fmt.Println("FAILED!") - } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) - Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) - } -}) diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index 88dcfadd8ec6..9dabdc86aec2 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -133,8 +133,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { _, _ = e2e.ParsePods(kubeConfigFile, true) }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) - Expect(err).NotTo(HaveOccurred()) + Expect(KillK3sCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed()) }) }) Context("Verify prefer-bundled-bin flag", func() { @@ -177,8 +176,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { _, _ = e2e.ParsePods(kubeConfigFile, true) }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) - Expect(err).NotTo(HaveOccurred()) + Expect(KillK3sCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed()) }) }) Context("Verify disable-agent and egress-selector-mode flags", func() { @@ -248,8 +246,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) - Expect(err).NotTo(HaveOccurred()) + Expect(KillK3sCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed()) }) }) Context("Verify server fails to start with bootstrap token", func() { @@ -265,9 +262,61 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + Expect(KillK3sCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + }) + }) + Context("Verify k3s killall subcommand works on all nodes", func() { + It("Starts K3s with no issues", func() { + err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), "", "") + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) }) + + It("Checks node and pod status", func() { + fmt.Printf("\nFetching node status\n") + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "360s", "5s").Should(Succeed()) + _, _ = e2e.ParseNodes(kubeConfigFile, true) + + fmt.Printf("\nFetching pods status\n") + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "360s", "5s").Should(Succeed()) + _, _ = e2e.ParsePods(kubeConfigFile, true) + }) + + It("Kills the cluster", func() { + for _, node := range append(serverNodeNames, agentNodeNames...) { + _, err := e2e.RunCmdOnNode("k3s killall", node) + Expect(err).NotTo(HaveOccurred()) + } + }) + It("Checks that no k3s processes are running", func() { + Eventually(func(g Gomega) { + for _, node := range append(serverNodeNames, agentNodeNames...) { + g.Expect(e2e.RunCmdOnNode("ps x | grep k3s || true", node)).To(BeEmpty()) + } + }, "20s", "5s").Should(Succeed()) + }) }) })