[fix_cloud] Add juju parametrisation, and direct lma to model

This commit is contained in:
Arif Ali 2023-09-25 10:44:42 +01:00
parent 85a5cadd47
commit c43b07d385
Signed by: arif
GPG Key ID: 369608FBA1353A70

View File

@ -6,6 +6,8 @@
# This script is required after a reboot of the cloud after the cloud has been
# shut down
model=" -m cpe-focal"
LMA_SERVERS="off"
check_juju_version()
@ -16,11 +18,24 @@ check_juju_version()
juju_run="juju run --timeout ${juju_timeout}"
juju_run_action="juju run-action --wait"
juju_status="juju status"
juju_ssh="juju ssh"
juju_scp="juju scp"
juju_config="juju config"
if [[ ${juju_version} -ge 3 ]] ; then
juju_run="juju exec --wait=${juju_timeout}"
juju_run_action="juju run"
fi
if [[ -n ${model} ]] ; then
juju_run+=${model}
juju_run_action+=${model}
juju_status+=${model}
juju_ssh+=${model}
juju_scp+=${model}
juju_config+=${model}
fi
}
check_unit_status()
@ -29,7 +44,7 @@ check_unit_status()
app_name=$1
status_check="$2"
unit_status=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
unit_status=$(${juju_status} --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
app_units=$(echo "${unit_status}" | jq -r .sub)
@ -50,18 +65,18 @@ get_lead()
{
app_name=$1
jq -rc '.applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key' "${juju_status}"
jq -rc '.applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key' "${juju_status_out}"
}
do_vault()
{
vault_vip=$(juju config vault vip)
vault_vip=$(${juju_config} vault vip)
echo export VAULT_ADDR="http://${vault_vip}:8200"
export VAULT_ADDR="http://${vault_vip}:8200"
echo " "
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status}")
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status_out}")
for ip in $IPS;do
echo export VAULT_ADDR=http://"${ip}":8200;
@ -77,19 +92,19 @@ do_vault()
check_juju_version
juju-wait -v
juju-wait -v ${model}
juju_status=$(mktemp)
juju_status_out=$(mktemp)
juju status --format json > "${juju_status}"
${juju_status} --format json > "${juju_status_out}"
# Check if we're using percona-cluster or mysql-innodb-cluster
percona_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"percona-cluster\") | .key" "${juju_status}")
mysql_innodb_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-innodb-cluster\") | .key" "${juju_status}")
percona_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"percona-cluster\") | .key" "${juju_status_out}")
mysql_innodb_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-innodb-cluster\") | .key" "${juju_status_out}")
if [[ -n "${percona_cluster}" ]] ; then
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status_out}")
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
@ -164,7 +179,7 @@ if [[ -n "${percona_cluster}" ]] ; then
# This is so that nagios doesn't report that the mysql daemon is down
# although the process is running. juju will then automatically start
# the mysqld process
juju ssh "${bootstrap_unit}" -- sudo reboot
${juju_ssh} "${bootstrap_unit}" -- sudo reboot
fi
${juju_run} -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
@ -172,7 +187,7 @@ fi
if [[ -n "${mysql_innodb_cluster}" ]] ; then
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status_out}")
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
@ -187,7 +202,7 @@ if [[ -n "${mysql_innodb_cluster}" ]] ; then
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status}")
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status_out}")
${juju_run_action} "${bootstrap_unit}" reboot-cluster-from-complete-outage
@ -196,7 +211,7 @@ if [[ -n "${mysql_innodb_cluster}" ]] ; then
#${juju_run} -a mysql-innodb-cluster "hooks/update-status"
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
#jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status}" \
#jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status_out}" \
# | xargs -I{} ${juju_run} -a "{}" -- 'hooks/update-status'
fi
@ -210,7 +225,7 @@ ${juju_run} -a vault -- sudo systemctl restart vault &
wait
# cleanup all crm resources
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status}" \
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status_out}" \
| xargs -I{} ${juju_run} -u "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
do_vault
@ -219,7 +234,7 @@ do_vault
${juju_run} --all -- "sudo sed -i -e s/192.168.1.13,//g -e s/192.168.1.9,//g /etc/netplan/99-juju.yaml"
${juju_run} --all -- "sudo netplan apply ; sudo systemctl restart systemd-resolved"
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status}")
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status_out}")
for apps in ${ceph_osd_apps}
do
@ -230,7 +245,7 @@ done
wait
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status}")
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status_out}")
cat > /tmp/restart-landscape.sh << EOF
#!/bin/bash
@ -240,11 +255,11 @@ EOF
for lds_server in ${lds_servers}
do
juju scp /tmp/restart-landscape.sh "${lds_server}":.
juju ssh "${lds_server}" chmod +x restart-landscape.sh
juju ssh "${lds_server}" sudo ./restart-landscape.sh &
${juju_scp} /tmp/restart-landscape.sh "${lds_server}":.
${juju_ssh} "${lds_server}" chmod +x restart-landscape.sh
${juju_ssh} "${lds_server}" sudo ./restart-landscape.sh &
done
wait
[[ $LMA_SERVERS == "off" ]] && ${juju_run} -a lma-server -- sudo halt -p
[[ $LMA_SERVERS == "off" ]] && ${juju_run} -m lma --all -- sudo halt -p