useful_scripts/fix_cloud.sh
Arif Ali 6e474ca18c
Fix -a to --all
made error with modification in previous commit
2022-10-24 12:11:35 +01:00

244 lines
7.9 KiB
Bash
Executable File

#!/bin/bash
# Used for debugging
# set -ax
# This script is required after a reboot of the cloud after the cloud has been
# shut down
check_juju_version()
{
juju_version=$(juju version | cut -d'-' -f1 | cut -d'.' -f1)
juju_run="juju run"
juju_run_action="juju run-action"
if [[ ${juju_version} -ge 3 ]] ; then
juju_run="juju exec"
juju_run_action="juju run"
fi
}
check_unit_status()
{
app_name=$1
status_check="$2"
unit_status=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
app_units=$(echo "${unit_status}" | jq -r .sub)
num=0
for unit in ${app_units} ; do
this_unit_status=$(echo "$unit_status" | jq -rc . | grep "${unit}" | jq -r .status)
if [[ "${this_unit_status}" == "${status_check}" ]] ; then
(( num++ ))
fi
done
if [[ $num -ge 3 ]] ; then echo 1
else echo 0
fi
}
get_lead()
{
app_name=$1
jq -rc '.applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key' "${juju_status}"
}
do_vault()
{
vault_vip=$(juju config vault vip)
echo export VAULT_ADDR="http://${vault_vip}:8200"
export VAULT_ADDR="http://${vault_vip}:8200"
echo " "
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status}")
for ip in $IPS;do
echo export VAULT_ADDR=http://"${ip}":8200;
export VAULT_ADDR=http://"${ip}":8200;
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
echo vault operator unseal -tls-skip-verify "$vault_key"
vault operator unseal -tls-skip-verify "$vault_key"
done;
done;
#${juju_run} -a vault "hooks/update-status"
}
check_juju_version
juju-wait -v
juju_status=$(mktemp)
juju status --format json > "${juju_status}"
# Check if we're using percona-cluster or mysql-innodb-cluster
percona_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"percona-cluster\") | .key" "${juju_status}")
mysql_innodb_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-innodb-cluster\") | .key" "${juju_status}")
if [[ -n "${percona_cluster}" ]] ; then
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
mysql_units=$(echo "${mysql_status}" | jq -r .sub)
bootstrap_unit=""
mysql_lead=$(get_lead mysql)
safe_to_bootstrap=$(echo "$mysql_status" | jq -rc . | grep "Safe To Bootstrap: 1" | jq -r .sub)
if [[ -n "$safe_to_bootstrap" ]]
then
bootstrap_unit=$safe_to_bootstrap
else
seq_number=$(echo "$mysql_status" | jq -rc . | grep "Sequence Number" )
if [[ -n "${seq_number}" ]]
then
seqs=$(echo "$seq_number" | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}")
uniq_seqs=$(echo "$seqs" | jq -r .seq | sort -n | uniq)
seq_count=$(echo "$uniq_seqs" | xargs | wc -w)
highest_seq=$(echo "${seqs}" | jq -r .seq | sort -n | uniq | tail -n 1)
if [[ ${seq_count} -eq 1 ]]
then # same seq numbers all round
if [[ ${highest_seq} -eq -1 ]]
then # if all seq numbers are -1
echo "The sequence number is -1 ... exiting"
exit 1
fi
bootstrap_unit=${mysql_lead}
else # we have different seq numbers
unit_high_seq=$(echo "$seqs" | jq -rc . | grep "${highest_seq}" | jq -r .sub | tail -n 1)
bootstrap_unit=${unit_high_seq}
fi
fi
fi
if [[ -n ${bootstrap_unit} ]]
then
${juju_run_action} --wait "${bootstrap_unit}" bootstrap-pxc
${juju_run} -a mysql "hooks/update-status"
until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]]
do
sleep 10
done
if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then
for unit in ${mysql_units}; do
if [[ "${unit}" != "${mysql_lead}" ]] ; then
${juju_run_action} --wait "${unit}" notify-bootstrapped
break
fi
done
else
${juju_run_action} --wait "${mysql_lead}" notify-bootstrapped
fi
${juju_run} -a mysql "hooks/update-status"
until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]]
do
sleep 10
done
# This is so that nagios doesn't report that the mysql daemon is down
# although the process is running. juju will then automatically start
# the mysqld process
juju ssh "${bootstrap_unit}" -- sudo reboot
fi
juju exec -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
fi
if [[ -n "${mysql_innodb_cluster}" ]] ; then
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
if [[ -z "${is_ready}" ]] ; then
reboot_status=$(${juju_run_action} mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --wait --format json)
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
if [[ ${outcome} == null ]] ; then
output=$(echo "$reboot_status" | jq .[].results.output)
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status}")
${juju_run_action} "${bootstrap_unit}" reboot-cluster-from-complete-outage --wait
fi
#${juju_run} -a mysql-innodb-cluster "hooks/update-status"
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
#jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status}" \
# | xargs -I{} ${juju_run} -a "{}" -- 'hooks/update-status'
fi
fi
${juju_run} -u elasticsearch/leader -- sudo systemctl restart elasticsearch &
${juju_run} -a heat -- sudo systemctl restart heat-engine &
${juju_run} -a vault -- sudo systemctl restart vault &
wait
# cleanup all crm resources
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status}" \
| xargs -I{} ${juju_run} -u "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
do_vault
# remove DNS entry for external network
${juju_run} --all -- "sudo sed -i -e s/192.168.1.13,//g -e s/192.168.1.9,//g /etc/netplan/99-juju.yaml"
${juju_run} --all -- "sudo netplan apply ; sudo systemctl restart systemd-resolved"
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status}")
for apps in ${ceph_osd_apps}
do
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service ceph-volume@*' &
done
wait
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status}")
cat > /tmp/restart-landscape.sh << EOF
#!/bin/bash
sudo systemctl restart landscape-*
EOF
for lds_server in ${lds_servers}
do
juju scp /tmp/restart-landscape.sh "${lds_server}":.
juju ssh "${lds_server}" chmod +x restart-landscape.sh
juju ssh "${lds_server}" sudo ./restart-landscape.sh &
done
wait