Update to latest fix_cloud.sh script
This commit is contained in:
parent
bf4bfc524c
commit
2a46f13949
96
fix_cloud.sh
96
fix_cloud.sh
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ax
|
||||
# Used for debugging
|
||||
# set -ax
|
||||
|
||||
# This script is required after a reboot of the cloud after the cloud has been
|
||||
# shut down
|
||||
@ -31,20 +32,39 @@ check_unit_status()
|
||||
get_lead()
|
||||
{
|
||||
app_name=$1
|
||||
units=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | .key")
|
||||
|
||||
for unit in ${units} ; do
|
||||
is_leader=$(juju run --unit ${unit} "is-leader")
|
||||
[[ "${is_leader}" == "True" ]] && unit_lead=${unit} && break
|
||||
done
|
||||
|
||||
echo $unit_lead
|
||||
cat ${juju_status} | jq -rc ".applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key"
|
||||
}
|
||||
|
||||
do_vault()
|
||||
{
|
||||
vault_vip=$(juju config vault vip)
|
||||
echo export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
|
||||
echo " "
|
||||
|
||||
IPS=$(cat ${juju_status} | jq '.applications.vault.units | to_entries[] | .value."public-address"' | sed s/\"//g)
|
||||
|
||||
for ip in $IPS;do
|
||||
echo export VAULT_ADDR=http://${ip}:8200;
|
||||
export VAULT_ADDR=http://${ip}:8200;
|
||||
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify $vault_key
|
||||
vault operator unseal -tls-skip-verify $vault_key
|
||||
done;
|
||||
done;
|
||||
|
||||
juju run -a vault "hooks/update-status"
|
||||
}
|
||||
|
||||
juju-wait -v
|
||||
|
||||
mysql_status=$(juju status --format json | jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
|
||||
juju_status=$(mktemp)
|
||||
|
||||
juju status --format json > ${juju_status}
|
||||
|
||||
mysql_status=$(cat ${juju_status} | jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
|
||||
|
||||
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
|
||||
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
@ -64,7 +84,6 @@ then
|
||||
|
||||
else
|
||||
|
||||
#seq_number=$(echo $mysql_status | jq -rc . | grep "Sequence Number" | jq .status | sed s/\"//g)
|
||||
seq_number=$(echo $mysql_status | jq -rc . | grep "Sequence Number" )
|
||||
|
||||
if [[ -n "${seq_number}" ]]
|
||||
@ -75,13 +94,20 @@ else
|
||||
uniq_seqs=$(echo $seqs| jq .seq | sed s/\"//g | sort -n | uniq)
|
||||
seq_count=$(echo $uniq_seqs | xargs | wc -w)
|
||||
|
||||
highest_seq=$(echo "${seqs}"| jq .seq | sed s/\"//g | sort -n | uniq | tail -n 1)
|
||||
lowest_seq=$(echo "${seqs}"| jq .seq | sed s/\"//g | sort -n | uniq | head -n 1)
|
||||
|
||||
if [[ ${seq_count} -eq 1 ]]
|
||||
then # same seq numbers all round
|
||||
if [[ ${highest_seq} -eq -1 ]]
|
||||
then # if all seq numbers are -1
|
||||
echo "The sequence number is -1 ... exiting"
|
||||
exit 1
|
||||
fi
|
||||
bootstrap_unit=${mysql_lead}
|
||||
else # we have different seq numbers
|
||||
|
||||
highest_seq=$(echo $seqs| jq .seq | sed s/\"//g | sort -n | uniq | head -n 1)
|
||||
unit_high_seq=$(echo seqs | jq -rc . | grep ${highest_seq} | jq .sub | sed s/\"//g)
|
||||
unit_high_seq=$(echo $seqs | jq -rc . | grep ${highest_seq} | jq .sub | sed s/\"//g | tail -n 1)
|
||||
|
||||
bootstrap_unit=${unit_high_seq}
|
||||
fi
|
||||
@ -116,43 +142,51 @@ then
|
||||
# This is so that nagios doesn't report that the mysql daemon is down
|
||||
# although the process is running. juju will then automatically start
|
||||
# the mysqld process
|
||||
juju run --timeout 30s --unit ${bootstrap_unit} -- sudo reboot
|
||||
juju ssh ${bootstrap_unit} -- sudo reboot
|
||||
fi
|
||||
|
||||
juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth
|
||||
juju run -a heat -- sudo systemctl restart heat-engine
|
||||
juju run -a vault -- sudo systemctl restart vault
|
||||
juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
|
||||
juju run -a heat -- sudo systemctl restart heat-engine &
|
||||
juju run -a vault -- sudo systemctl restart vault &
|
||||
|
||||
juju run -a nova-cloud-controller "hooks/update-status"
|
||||
juju run -a heat "hooks/update-status"
|
||||
wait
|
||||
|
||||
for app in nova-cloud-controller heat vault ; do
|
||||
juju run -a $app "hooks/update-status" &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
# cleanup all crm resources
|
||||
juju status --format json | jq ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" | sed s/\"//g | xargs -i juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
|
||||
cat ${juju_status} | jq ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" | sed s/\"//g | xargs -i juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
|
||||
|
||||
cd ~/stsstack-bundles/openstack/arif/
|
||||
./vault.sh
|
||||
|
||||
juju run -a vault "hooks/update-status"
|
||||
do_vault
|
||||
|
||||
# Wait 10 seconds, and ensure that vault is unsealed
|
||||
echo "Sleeping 10 seconds to wait for vault to finalise unseal"
|
||||
sleep 10
|
||||
|
||||
ceph_osds=$(juju status ceph-osd --format json | jq -rc ". | .applications[\"ceph-osd\"].units | to_entries[] | .key")
|
||||
ceph_osd_apps=$(cat ${juju_status} | jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key")
|
||||
|
||||
ceph_osds=""
|
||||
for apps in ${ceph_osd_apps}
|
||||
do
|
||||
ceph_osds="${ceph_osds} $(cat ${juju_status} | jq -rc ". | .applications[\"${apps}\"].units | to_entries[] | .key")"
|
||||
done
|
||||
|
||||
|
||||
for ceph_osd in ${ceph_osds}
|
||||
do
|
||||
osds=$(juju ssh ${ceph_osd} -- sudo ceph-volume lvm list --format json | jq -rc ". | to_entries[] | {id:.key,key:.value[].tags[\"ceph.osd_fsid\"]}")
|
||||
for osd in ${osds}; do
|
||||
osd_id=$(echo $osd | jq .id | sed s/\"//g)
|
||||
uuid=$(echo $osd | jq .key | sed s/\"//g)
|
||||
juju ssh ${ceph_osd} -- sudo systemctl restart ceph-volume@lvm-${osd_id}-${uuid}
|
||||
done
|
||||
juju ssh ${ceph_osd} -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@* ;
|
||||
sudo systemctl start --all --type=service vaultlocker-decrypt@* ;
|
||||
sudo systemctl start --all --type=service ceph-volume@*' &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
juju run -a ceph-osd "hooks/update-status"
|
||||
|
||||
lds_servers=$(juju status landscape-server --format json | jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key")
|
||||
lds_servers=$(cat ${juju_status} | jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key")
|
||||
|
||||
cat > /tmp/restart-landscape.sh << EOF
|
||||
#!/bin/bash
|
||||
|
18
vault-secrets.txt
Normal file
18
vault-secrets.txt
Normal file
@ -0,0 +1,18 @@
|
||||
Unseal Key 1: Qqqu4T3C4iYGqDVLoWkKqVv7doCyj1q9y6zmMK4kwco1
|
||||
Unseal Key 2: W3TKRBJ0QDt+wNJ0xvM9MTcwJdoJhZBCSb4ua2UJF7DV
|
||||
Unseal Key 3: ZLqFmQXgeTiGoUJesMnhaCt9EK9IZ1neXZlfw8EuAYcb
|
||||
Unseal Key 4: Obsm1N8/mGteI72iQ2YzQxy5iqTts9LKPY04gp2IVc0o
|
||||
Unseal Key 5: wh8mao0F0C/WAsL6F0BbLg4MWJP5LOgyDmdqGlHMBcmF
|
||||
|
||||
Initial Root Token: s.V4hmq31Dng29yFghr3LqnElC
|
||||
|
||||
Vault initialized with 5 key shares and a key threshold of 3. Please securely
|
||||
distribute the key shares printed above. When the Vault is re-sealed,
|
||||
restarted, or stopped, you must supply at least 3 of these keys to unseal it
|
||||
before it can start servicing requests.
|
||||
|
||||
Vault does not store the generated master key. Without at least 3 key to
|
||||
reconstruct the master key, Vault will remain permanently sealed!
|
||||
|
||||
It is possible to generate new unseal keys, provided you have a quorum of
|
||||
existing unseal keys shares. See "vault operator rekey" for more information.
|
Loading…
Reference in New Issue
Block a user