forked from Nordix/metal3-dev-env
-
Notifications
You must be signed in to change notification settings - Fork 1
/
host_cleanup.sh
executable file
·77 lines (66 loc) · 2.27 KB
/
host_cleanup.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/usr/bin/env bash
set -x
# shellcheck disable=SC1091
source lib/logging.sh
# shellcheck disable=SC1091
source lib/common.sh
# Kill and remove the running ironic containers
remove_ironic_containers
# Remove existing pod
if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
for pod in ironic-pod infra-pod; do
if sudo "${CONTAINER_RUNTIME}" pod exists "${pod}"; then
sudo "${CONTAINER_RUNTIME}" pod rm "${pod}" -f
fi
done
fi
# Kill the locally running operators
if [ "${BMO_RUN_LOCAL}" = true ]; then
kill "$(pgrep "run-bmo-loop.sh")" 2> /dev/null || true
kill "$(pgrep "operator-sdk")" 2> /dev/null || true
fi
if [ "${CAPM3_RUN_LOCAL}" = true ]; then
CAPM3_PARENT_PID="$(pgrep -f "go run ./cmd/manager/main.go")"
if [[ "${CAPM3_PARENT_PID}" != "" ]]; then
CAPM3_GO_PID="$(pgrep -P "${CAPM3_PARENT_PID}" )"
kill "${CAPM3_GO_PID}" 2> /dev/null || true
fi
fi
ANSIBLE_FORCE_COLOR=true ansible-playbook \
-e "working_dir=$WORKING_DIR" \
-e "num_nodes=$NUM_NODES" \
-e "extradisks=$VM_EXTRADISKS" \
-e "virthost=$HOSTNAME" \
-e "manage_external=$MANAGE_EXT_BRIDGE" \
-e "nodes_file=$NODES_FILE" \
-i vm-setup/inventory.ini \
-b -v vm-setup/teardown-playbook.yml
ANSIBLE_FORCE_COLOR=true ansible-playbook \
-e "use_firewalld=${USE_FIREWALLD}" \
-e "firewall_rule_state=absent" \
-i vm-setup/inventory.ini \
-b -v vm-setup/firewall.yml
# There was a bug in this file, it may need to be recreated.
if [[ $OS == "centos" || $OS == "rhel" ]]; then
sudo rm -rf /etc/NetworkManager/conf.d/dnsmasq.conf
if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
sudo nmcli con delete provisioning
fi
# External net should have been cleaned already at this stage, but we double
# check as leaving it around causes issues when the host is rebooted
if [ "${MANAGE_EXT_BRIDGE}" == "y" ]; then
sudo nmcli con delete external || true
fi
fi
# Clean up any serial logs
sudo rm -rf /var/log/libvirt/qemu/*serial0.log*
# Clean up BMH CRs
sudo rm -rf "${WORKING_DIR}"/bmhosts_crs.yaml
sudo rm -rf "${WORKING_DIR}"/bmhs
# TODO(Sunnatillo): Remove first line after deprication of camp3 v1.4
rm -rf "${HOME}"/.cluster-api
if [[ -n "${XDG_CONFIG_HOME}" ]]; then
rm -rf "${XDG_CONFIG_HOME}"/cluster-api
else
rm -rf "${HOME}"/.config/cluster-api
fi