Update fix_cloud for prep for juju 3

This commit is contained in:
Arif Ali 2022-10-24 11:37:22 +01:00
parent e943d70891
commit 5b1bb2a1f1
Signed by: arif
GPG Key ID: 369608FBA1353A70

View File

@ -6,6 +6,18 @@
# This script is required after a reboot of the cloud after the cloud has been
# shut down
check_juju_version()
{
juju_version=$(juju version | cut -d'-' -f1 | cut -d'.' -f1)
juju_run="juju run"
juju_run_action="juju run-action"
if [[ ${juju_version} -ge 3 ]] ; then
juju_run="juju exec"
juju_run_action="juju run"
fi
}
check_unit_status()
{
@ -55,9 +67,11 @@ do_vault()
done;
done;
#juju run -a vault "hooks/update-status"
#${juju_run} -a vault "hooks/update-status"
}
check_juju_version
juju-wait -v
juju_status=$(mktemp)
@ -121,8 +135,8 @@ if [[ -n "${percona_cluster}" ]] ; then
if [[ -n ${bootstrap_unit} ]]
then
juju run-action --wait "${bootstrap_unit}" bootstrap-pxc
juju run --application mysql "hooks/update-status"
${juju_run_action} --wait "${bootstrap_unit}" bootstrap-pxc
${juju_run} --application mysql "hooks/update-status"
until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]]
do
sleep 10
@ -130,14 +144,14 @@ if [[ -n "${percona_cluster}" ]] ; then
if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then
for unit in ${mysql_units}; do
if [[ "${unit}" != "${mysql_lead}" ]] ; then
juju run-action --wait "${unit}" notify-bootstrapped
${juju_run_action} --wait "${unit}" notify-bootstrapped
break
fi
done
else
juju run-action --wait "${mysql_lead}" notify-bootstrapped
${juju_run_action} --wait "${mysql_lead}" notify-bootstrapped
fi
juju run -a mysql "hooks/update-status"
${juju_run} -a mysql "hooks/update-status"
until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]]
do
sleep 10
@ -148,7 +162,7 @@ if [[ -n "${percona_cluster}" ]] ; then
juju ssh "${bootstrap_unit}" -- sudo reboot
fi
juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
juju exec -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
fi
if [[ -n "${mysql_innodb_cluster}" ]] ; then
@ -158,7 +172,7 @@ if [[ -n "${mysql_innodb_cluster}" ]] ; then
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
if [[ -z "${is_ready}" ]] ; then
reboot_status=$(juju run-action mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --wait --format json)
reboot_status=$(${juju_run_action} mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --wait --format json)
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
@ -170,43 +184,43 @@ if [[ -n "${mysql_innodb_cluster}" ]] ; then
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status}")
juju run-action "${bootstrap_unit}" reboot-cluster-from-complete-outage --wait
${juju_run_action} "${bootstrap_unit}" reboot-cluster-from-complete-outage --wait
fi
#juju run --application mysql-innodb-cluster "hooks/update-status"
#${juju_run} --application mysql-innodb-cluster "hooks/update-status"
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
#jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status}" \
# | xargs -I{} juju run -a "{}" -- 'hooks/update-status'
# | xargs -I{} ${juju_run} -a "{}" -- 'hooks/update-status'
fi
fi
juju run -u elasticsearch/leader -- sudo systemctl restart elasticsearch &
juju run -a heat -- sudo systemctl restart heat-engine &
juju run -a vault -- sudo systemctl restart vault &
${juju_run} -u elasticsearch/leader -- sudo systemctl restart elasticsearch &
${juju_run} -a heat -- sudo systemctl restart heat-engine &
${juju_run} -a vault -- sudo systemctl restart vault &
wait
# cleanup all crm resources
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status}" \
| xargs -I{} juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
| xargs -I{} ${juju_run} --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
do_vault
# remove DNS entry for external network
juju run --all -- sudo sed -i -e s/192.168.1.13,//g -e s/192.168.1.9,//g /etc/netplan/99-juju.yaml
juju run --all -- "sudo netplan apply ; sudo systemctl restart systemd-resolved"
${juju_run} --all -- sudo sed -i -e s/192.168.1.13,//g -e s/192.168.1.9,//g /etc/netplan/99-juju.yaml
${juju_run} --all -- "sudo netplan apply ; sudo systemctl restart systemd-resolved"
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status}")
for apps in ${ceph_osd_apps}
do
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service ceph-volume@*' &
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service ceph-volume@*' &
done
wait