useful_scripts/fix_cloud.sh
Arif Ali 4ee03a9887
Many Updates
* Combine bionic and focal fix_cloud script
* Add latest vault keys for my lab
* Add new shudown script for my lab envs
2022-07-01 20:28:45 +01:00

228 lines
7.3 KiB
Bash
Executable File

#!/bin/bash
# Used for debugging
# set -ax
# This script is required after a reboot of the cloud after the cloud has been
# shut down
check_unit_status()
{
app_name=$1
status_check="$2"
unit_status=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
app_units=$(echo "${unit_status}" | jq -r .sub)
num=0
for unit in ${app_units} ; do
this_unit_status=$(echo "$unit_status" | jq -rc . | grep "${unit}" | jq -r .status)
if [[ "${this_unit_status}" == "${status_check}" ]] ; then
(( num++ ))
fi
done
if [[ $num -ge 3 ]] ; then echo 1
else echo 0
fi
}
get_lead()
{
app_name=$1
jq -rc ".applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key" "${juju_status}"
}
do_vault()
{
vault_vip=$(juju config vault vip)
echo export VAULT_ADDR="http://${vault_vip}:8200"
export VAULT_ADDR="http://${vault_vip}:8200"
echo " "
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status}")
for ip in $IPS;do
echo export VAULT_ADDR=http://"${ip}":8200;
export VAULT_ADDR=http://"${ip}":8200;
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
echo vault operator unseal -tls-skip-verify "$vault_key"
vault operator unseal -tls-skip-verify "$vault_key"
done;
done;
juju run -a vault "hooks/update-status"
}
juju-wait -v
juju_status=$(mktemp)
juju status --format json > "${juju_status}"
# Check if we're using percona-cluster or mysql-innodb-cluster
percona_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"percona-cluster\") | .key" "${juju_status}")
mysql_innodb_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-innodb-cluster\") | .key" "${juju_status}")
if [[ -n "${percona_cluster}" ]] ; then
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
mysql_units=$(echo "${mysql_status}" | jq -r .sub)
bootstrap_unit=""
mysql_lead=$(get_lead mysql)
safe_to_bootstrap=$(echo "$mysql_status" | jq -rc . | grep "Safe To Bootstrap: 1" | jq -r .sub)
if [[ -n "$safe_to_bootstrap" ]]
then
bootstrap_unit=$safe_to_bootstrap
else
seq_number=$(echo "$mysql_status" | jq -rc . | grep "Sequence Number" )
if [[ -n "${seq_number}" ]]
then
seqs=$(echo "$seq_number" | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}")
uniq_seqs=$(echo "$seqs" | jq -r .seq | sort -n | uniq)
seq_count=$(echo "$uniq_seqs" | xargs | wc -w)
highest_seq=$(echo "${seqs}" | jq -r .seq | sort -n | uniq | tail -n 1)
if [[ ${seq_count} -eq 1 ]]
then # same seq numbers all round
if [[ ${highest_seq} -eq -1 ]]
then # if all seq numbers are -1
echo "The sequence number is -1 ... exiting"
exit 1
fi
bootstrap_unit=${mysql_lead}
else # we have different seq numbers
unit_high_seq=$(echo "$seqs" | jq -rc . | grep "${highest_seq}" | jq -r .sub | tail -n 1)
bootstrap_unit=${unit_high_seq}
fi
fi
fi
if [[ -n ${bootstrap_unit} ]]
then
juju run-action --wait "${bootstrap_unit}" bootstrap-pxc
juju run --application mysql "hooks/update-status"
until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]]
do
sleep 10
done
if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then
for unit in ${mysql_units}; do
if [[ "${unit}" != "${mysql_lead}" ]] ; then
juju run-action --wait "${unit}" notify-bootstrapped
break
fi
done
else
juju run-action --wait "${mysql_lead}" notify-bootstrapped
fi
juju run -a mysql "hooks/update-status"
until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]]
do
sleep 10
done
# This is so that nagios doesn't report that the mysql daemon is down
# although the process is running. juju will then automatically start
# the mysqld process
juju ssh "${bootstrap_unit}" -- sudo reboot
fi
juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
fi
if [[ -n "${mysql_innodb_cluster}" ]] ; then
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
if [[ -z "${is_ready}" ]] ; then
reboot_status=$(juju run-action mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --wait --format json)
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
if [[ ${outcome} == null ]] ; then
output=$(echo "$reboot_status" | jq .[].results.output)
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status}")
juju run-action "${bootstrap_unit}" reboot-cluster-from-complete-outage --wait
fi
juju run --application mysql-innodb-cluster "hooks/update-status"
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status}" \
| xargs -I{} juju run -a "{}" -- 'hooks/update-status'
fi
fi
juju run -a heat -- sudo systemctl restart heat-engine &
juju run -a vault -- sudo systemctl restart vault &
wait
# cleanup all crm resources
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status}" \
| xargs -I{} juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
do_vault
juju run --all -- sudo systemctl restart systemd-resolved
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status}")
ceph_osds=""
for apps in ${ceph_osd_apps}
do
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service ceph-volume@*' &
done
wait
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status}")
cat > /tmp/restart-landscape.sh << EOF
#!/bin/bash
sudo systemctl restart landscape-*
EOF
for lds_server in ${lds_servers}
do
juju scp /tmp/restart-landscape.sh "${lds_server}":.
juju ssh "${lds_server}" chmod +x restart-landscape.sh
juju ssh "${lds_server}" sudo ./restart-landscape.sh &
done
wait