Updates from local machine
* Add functions.sh for common tasks * Update various scripts to use functions.sh * Update mongo status to disable all ssh key checks * Add a few extra scripts
This commit is contained in:
parent
bb166ecc96
commit
d84ce463b1
@ -3,9 +3,9 @@
|
||||
check_controller()
|
||||
{
|
||||
controller=$1
|
||||
model="cpe-focal"
|
||||
model="cpe-jammy"
|
||||
|
||||
juju status --debug -m "${controller}":${model} --color | grep ^Unit -A 999999 | grep -E -v "started.*focal|started.*bionic|active.*idle"
|
||||
juju status -m "${controller}":${model} --color | grep "Unit " -A 999999 | grep -E -v "started.*ubuntu@|active.*idle"
|
||||
|
||||
}
|
||||
|
||||
|
@ -22,4 +22,4 @@ EOF
|
||||
|
||||
ssh_key=$HOME/.local/share/juju/ssh/juju_id_rsa
|
||||
|
||||
ssh -l ubuntu -i ${ssh_key} ${host} "${cmds}"
|
||||
ssh -o IdentityAgent=none -l ubuntu -i ${ssh_key} ${host} "${cmds}"
|
||||
|
@ -1,6 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
juju run --timeout=30s -a ceph-osd -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
|
||||
juju run --timeout=30s -a ceph-osd -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
|
||||
juju run --timeout=30s -a ceph-osd -- 'sudo systemctl start --all --type=service ceph-volume@*'
|
||||
. functions.sh
|
||||
check_juju_version
|
||||
|
||||
get_juju_status
|
||||
|
||||
do_ceph
|
||||
|
21
dump_memcache.sh
Executable file
21
dump_memcache.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
memcached_units="$(juju status --format json | jq -rc '.applications.memcached.units | keys[]')"
|
||||
#prefix="/tmp"
|
||||
prefix="."
|
||||
|
||||
cmd="/usr/share/memcached/scripts/memcached-tool"
|
||||
host="127.0.0.1:11211"
|
||||
mm_cmds="display stats settings dump"
|
||||
|
||||
for u in ${memcached_units}; do
|
||||
f=$(echo ${u//\//_})
|
||||
echo "Working on unit $u"
|
||||
location=${prefix}/${f}.txt
|
||||
rm -rf ${location}
|
||||
for mm_cmd in ${mm_cmds} ; do
|
||||
echo "${mm_cmd}:" >> ${location}
|
||||
juju ssh $u "${cmd} ${host} ${mm_cmd}" >> ${location}
|
||||
done
|
||||
done
|
||||
exit 0
|
218
fix_cloud.sh
218
fix_cloud.sh
@ -6,221 +6,28 @@
|
||||
# This script is required after a reboot of the cloud after the cloud has been
|
||||
# shut down
|
||||
|
||||
model=" -m cpe-focal"
|
||||
|
||||
LMA_SERVERS="off"
|
||||
|
||||
check_juju_version()
|
||||
{
|
||||
juju_version=$(juju version | cut -d'-' -f1 | cut -d'.' -f1)
|
||||
|
||||
juju_timeout="30s"
|
||||
|
||||
juju_run="juju run --timeout ${juju_timeout}"
|
||||
juju_run_action="juju run-action --wait"
|
||||
juju_status="juju status"
|
||||
juju_ssh="juju ssh"
|
||||
juju_scp="juju scp"
|
||||
juju_config="juju config"
|
||||
|
||||
if [[ ${juju_version} -ge 3 ]] ; then
|
||||
juju_run="juju exec --wait=${juju_timeout}"
|
||||
juju_run_action="juju run"
|
||||
fi
|
||||
|
||||
if [[ -n ${model} ]] ; then
|
||||
juju_run+=${model}
|
||||
juju_run_action+=${model}
|
||||
juju_status+=${model}
|
||||
juju_ssh+=${model}
|
||||
juju_scp+=${model}
|
||||
juju_config+=${model}
|
||||
fi
|
||||
}
|
||||
|
||||
check_unit_status()
|
||||
{
|
||||
|
||||
app_name=$1
|
||||
status_check="$2"
|
||||
|
||||
unit_status=$(${juju_status} --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
|
||||
|
||||
app_units=$(echo "${unit_status}" | jq -r .sub)
|
||||
|
||||
num=0
|
||||
for unit in ${app_units} ; do
|
||||
this_unit_status=$(echo "$unit_status" | jq -rc . | grep "${unit}" | jq -r .status)
|
||||
if [[ "${this_unit_status}" == "${status_check}" ]] ; then
|
||||
(( num++ ))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $num -ge 3 ]] ; then echo 1
|
||||
else echo 0
|
||||
fi
|
||||
}
|
||||
|
||||
get_lead()
|
||||
{
|
||||
app_name=$1
|
||||
|
||||
jq -rc '.applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key' "${juju_status_out}"
|
||||
}
|
||||
|
||||
do_vault()
|
||||
{
|
||||
vault_vip=$(${juju_config} vault vip)
|
||||
echo export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
|
||||
echo " "
|
||||
|
||||
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status_out}")
|
||||
|
||||
for ip in $IPS;do
|
||||
echo export VAULT_ADDR=http://"${ip}":8200;
|
||||
export VAULT_ADDR=http://"${ip}":8200;
|
||||
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify "$vault_key"
|
||||
vault operator unseal -tls-skip-verify "$vault_key"
|
||||
done;
|
||||
done;
|
||||
|
||||
#${juju_run} -a vault "hooks/update-status"
|
||||
}
|
||||
|
||||
. functions.sh
|
||||
check_juju_version
|
||||
|
||||
juju-wait -v ${model}
|
||||
|
||||
juju_status_out=$(mktemp)
|
||||
get_juju_status
|
||||
|
||||
${juju_status} --format json > "${juju_status_out}"
|
||||
|
||||
# Check if we're using percona-cluster or mysql-innodb-cluster
|
||||
# Check if we're using percona-cluster and/or mysql-innodb-cluster
|
||||
percona_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"percona-cluster\") | .key" "${juju_status_out}")
|
||||
mysql_innodb_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-innodb-cluster\") | .key" "${juju_status_out}")
|
||||
|
||||
if [[ -n "${percona_cluster}" ]] ; then
|
||||
|
||||
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status_out}")
|
||||
|
||||
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
|
||||
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
|
||||
mysql_units=$(echo "${mysql_status}" | jq -r .sub)
|
||||
bootstrap_unit=""
|
||||
|
||||
mysql_lead=$(get_lead mysql)
|
||||
|
||||
safe_to_bootstrap=$(echo "$mysql_status" | jq -rc . | grep "Safe To Bootstrap: 1" | jq -r .sub)
|
||||
|
||||
if [[ -n "$safe_to_bootstrap" ]]
|
||||
then
|
||||
|
||||
bootstrap_unit=$safe_to_bootstrap
|
||||
|
||||
else
|
||||
|
||||
seq_number=$(echo "$mysql_status" | jq -rc . | grep "Sequence Number" )
|
||||
|
||||
if [[ -n "${seq_number}" ]]
|
||||
then
|
||||
|
||||
seqs=$(echo "$seq_number" | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}")
|
||||
|
||||
uniq_seqs=$(echo "$seqs" | jq -r .seq | sort -n | uniq)
|
||||
seq_count=$(echo "$uniq_seqs" | xargs | wc -w)
|
||||
|
||||
highest_seq=$(echo "${seqs}" | jq -r .seq | sort -n | uniq | tail -n 1)
|
||||
|
||||
if [[ ${seq_count} -eq 1 ]]
|
||||
then # same seq numbers all round
|
||||
if [[ ${highest_seq} -eq -1 ]]
|
||||
then # if all seq numbers are -1
|
||||
echo "The sequence number is -1 ... exiting"
|
||||
exit 1
|
||||
fi
|
||||
bootstrap_unit=${mysql_lead}
|
||||
else # we have different seq numbers
|
||||
|
||||
unit_high_seq=$(echo "$seqs" | jq -rc . | grep "${highest_seq}" | jq -r .sub | tail -n 1)
|
||||
|
||||
bootstrap_unit=${unit_high_seq}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${bootstrap_unit} ]]
|
||||
then
|
||||
${juju_run_action} "${bootstrap_unit}" bootstrap-pxc
|
||||
${juju_run} -a mysql "hooks/update-status"
|
||||
until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]]
|
||||
do
|
||||
sleep 10
|
||||
done
|
||||
if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then
|
||||
for unit in ${mysql_units}; do
|
||||
if [[ "${unit}" != "${mysql_lead}" ]] ; then
|
||||
${juju_run_action} "${unit}" notify-bootstrapped
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
${juju_run_action} "${mysql_lead}" notify-bootstrapped
|
||||
fi
|
||||
${juju_run} -a mysql "hooks/update-status"
|
||||
until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]]
|
||||
do
|
||||
sleep 10
|
||||
done
|
||||
# This is so that nagios doesn't report that the mysql daemon is down
|
||||
# although the process is running. juju will then automatically start
|
||||
# the mysqld process
|
||||
${juju_ssh} "${bootstrap_unit}" -- sudo reboot
|
||||
fi
|
||||
|
||||
${juju_run} -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
|
||||
do_percona_cluster
|
||||
fi
|
||||
|
||||
if [[ -n "${mysql_innodb_cluster}" ]] ; then
|
||||
|
||||
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status_out}")
|
||||
|
||||
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
|
||||
|
||||
if [[ -z "${is_ready}" ]] ; then
|
||||
reboot_status=$(${juju_run_action} mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --format json)
|
||||
|
||||
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
|
||||
|
||||
if [[ ${outcome} == null ]] ; then
|
||||
|
||||
output=$(echo "$reboot_status" | jq .[].results.output)
|
||||
|
||||
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
|
||||
|
||||
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status_out}")
|
||||
|
||||
${juju_run_action} "${bootstrap_unit}" reboot-cluster-from-complete-outage
|
||||
|
||||
fi
|
||||
|
||||
#${juju_run} -a mysql-innodb-cluster "hooks/update-status"
|
||||
|
||||
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
|
||||
#jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status_out}" \
|
||||
# | xargs -I{} ${juju_run} -a "{}" -- 'hooks/update-status'
|
||||
|
||||
fi
|
||||
|
||||
do_mysql_innodb_cluster
|
||||
fi
|
||||
|
||||
${juju_run} -u elasticsearch/leader -- sudo systemctl restart elasticsearch &
|
||||
${juju_run} -a heat -- sudo systemctl restart heat-engine &
|
||||
${juju_run} -a vault -- sudo systemctl restart vault &
|
||||
${juju_run} -a ceph-radosgw -- 'sudo systemctl restart ceph-radosgw@*' &
|
||||
|
||||
wait
|
||||
|
||||
@ -228,22 +35,13 @@ wait
|
||||
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status_out}" \
|
||||
| xargs -I{} ${juju_run} -u "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
|
||||
|
||||
do_vault
|
||||
do_vault restart
|
||||
|
||||
# remove DNS entry for external network
|
||||
${juju_run} --all -- "sudo sed -i -e s/192.168.1.13,//g -e s/192.168.1.9,//g /etc/netplan/99-juju.yaml"
|
||||
${juju_run} --all -- "sudo netplan apply ; sudo systemctl restart systemd-resolved"
|
||||
|
||||
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status_out}")
|
||||
|
||||
for apps in ${ceph_osd_apps}
|
||||
do
|
||||
${juju_run} -a "${apps}" -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
|
||||
${juju_run} -a "${apps}" -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
|
||||
${juju_run} -a "${apps}" -- 'sudo systemctl start --all --type=service ceph-volume@*' &
|
||||
done
|
||||
|
||||
wait
|
||||
do_ceph
|
||||
|
||||
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status_out}")
|
||||
|
||||
|
217
functions.sh
Normal file
217
functions.sh
Normal file
@ -0,0 +1,217 @@
|
||||
#!/bin/bash
|
||||
|
||||
model=" -m cpe-jammy"
|
||||
LMA_SERVERS="on"
|
||||
|
||||
get_juju_status()
|
||||
{
|
||||
juju_status_out=$(mktemp)
|
||||
${juju_status} --format json > "${juju_status_out}"
|
||||
}
|
||||
|
||||
check_juju_version()
|
||||
{
|
||||
juju_version=$(juju version | cut -d'-' -f1 | cut -d'.' -f1)
|
||||
|
||||
juju_timeout="30s"
|
||||
|
||||
juju_run="juju run --timeout ${juju_timeout}"
|
||||
juju_run_action="juju run-action --wait"
|
||||
juju_status="juju status"
|
||||
juju_ssh="juju ssh"
|
||||
juju_scp="juju scp"
|
||||
juju_config="juju config"
|
||||
|
||||
if [[ ${juju_version} -ge 3 ]] ; then
|
||||
juju_run="juju exec --wait=${juju_timeout}"
|
||||
juju_run_action="juju run"
|
||||
fi
|
||||
|
||||
if [[ -n ${model} ]] ; then
|
||||
juju_run+=${model}
|
||||
juju_run_action+=${model}
|
||||
juju_status+=${model}
|
||||
juju_ssh+=${model}
|
||||
juju_scp+=${model}
|
||||
juju_config+=${model}
|
||||
fi
|
||||
}
|
||||
|
||||
do_vault()
|
||||
{
|
||||
arg=${1}
|
||||
vault_file="vault-secrets.txt"
|
||||
|
||||
if [[ ${arg} == "restart" ]] ; then
|
||||
${juju_run} -a vault -- sudo systemctl restart vault
|
||||
fi
|
||||
|
||||
vault_vip=$(${juju_config} vault vip)
|
||||
echo export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
|
||||
echo " "
|
||||
|
||||
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status_out}")
|
||||
|
||||
for ip in $IPS;do
|
||||
echo export VAULT_ADDR=http://"${ip}":8200;
|
||||
export VAULT_ADDR=http://"${ip}":8200;
|
||||
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify "$vault_key"
|
||||
vault operator unseal -tls-skip-verify "$vault_key"
|
||||
done;
|
||||
done;
|
||||
}
|
||||
|
||||
do_ceph()
|
||||
{
|
||||
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status_out}")
|
||||
|
||||
for apps in ${ceph_osd_apps}
|
||||
do
|
||||
${juju_run} -a "${apps}" -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
|
||||
${juju_run} -a "${apps}" -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
|
||||
${juju_run} -a "${apps}" -- 'sudo systemctl start --all --type=service ceph-volume@*' &
|
||||
done
|
||||
|
||||
wait
|
||||
}
|
||||
|
||||
check_unit_status()
|
||||
{
|
||||
|
||||
app_name=$1
|
||||
status_check="$2"
|
||||
|
||||
unit_status=$(${juju_status} --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
|
||||
|
||||
app_units=$(echo "${unit_status}" | jq -r .sub)
|
||||
|
||||
num=0
|
||||
for unit in ${app_units} ; do
|
||||
this_unit_status=$(echo "$unit_status" | jq -rc . | grep "${unit}" | jq -r .status)
|
||||
if [[ "${this_unit_status}" == "${status_check}" ]] ; then
|
||||
(( num++ ))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $num -ge 3 ]] ; then echo 1
|
||||
else echo 0
|
||||
fi
|
||||
}
|
||||
|
||||
get_lead()
|
||||
{
|
||||
app_name=$1
|
||||
|
||||
jq -rc '.applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key' "${juju_status_out}"
|
||||
}
|
||||
|
||||
do_mysql_innodb_cluster()
|
||||
{
|
||||
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status_out}")
|
||||
|
||||
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
|
||||
|
||||
if [[ -z "${is_ready}" ]] ; then
|
||||
reboot_status=$(${juju_run_action} mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --format json)
|
||||
|
||||
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
|
||||
|
||||
if [[ ${outcome} == null ]] ; then
|
||||
|
||||
output=$(echo "$reboot_status" | jq .[].results.output)
|
||||
|
||||
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
|
||||
|
||||
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status_out}")
|
||||
|
||||
${juju_run_action} "${bootstrap_unit}" reboot-cluster-from-complete-outage
|
||||
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
do_percona_cluster()
|
||||
{
|
||||
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status_out}")
|
||||
|
||||
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
|
||||
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
|
||||
mysql_units=$(echo "${mysql_status}" | jq -r .sub)
|
||||
bootstrap_unit=""
|
||||
|
||||
mysql_lead=$(get_lead mysql)
|
||||
|
||||
safe_to_bootstrap=$(echo "$mysql_status" | jq -rc . | grep "Safe To Bootstrap: 1" | jq -r .sub)
|
||||
|
||||
if [[ -n "$safe_to_bootstrap" ]]
|
||||
then
|
||||
|
||||
bootstrap_unit=$safe_to_bootstrap
|
||||
|
||||
else
|
||||
|
||||
seq_number=$(echo "$mysql_status" | jq -rc . | grep "Sequence Number" )
|
||||
|
||||
if [[ -n "${seq_number}" ]]
|
||||
then
|
||||
|
||||
seqs=$(echo "$seq_number" | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}")
|
||||
|
||||
uniq_seqs=$(echo "$seqs" | jq -r .seq | sort -n | uniq)
|
||||
seq_count=$(echo "$uniq_seqs" | xargs | wc -w)
|
||||
|
||||
highest_seq=$(echo "${seqs}" | jq -r .seq | sort -n | uniq | tail -n 1)
|
||||
|
||||
if [[ ${seq_count} -eq 1 ]]
|
||||
then # same seq numbers all round
|
||||
if [[ ${highest_seq} -eq -1 ]]
|
||||
then # if all seq numbers are -1
|
||||
echo "The sequence number is -1 ... exiting"
|
||||
exit 1
|
||||
fi
|
||||
bootstrap_unit=${mysql_lead}
|
||||
else # we have different seq numbers
|
||||
|
||||
unit_high_seq=$(echo "$seqs" | jq -rc . | grep "${highest_seq}" | jq -r .sub | tail -n 1)
|
||||
|
||||
bootstrap_unit=${unit_high_seq}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${bootstrap_unit} ]]
|
||||
then
|
||||
${juju_run_action} "${bootstrap_unit}" bootstrap-pxc
|
||||
${juju_run} -a mysql "hooks/update-status"
|
||||
until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]]
|
||||
do
|
||||
sleep 10
|
||||
done
|
||||
if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then
|
||||
for unit in ${mysql_units}; do
|
||||
if [[ "${unit}" != "${mysql_lead}" ]] ; then
|
||||
${juju_run_action} "${unit}" notify-bootstrapped
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
${juju_run_action} "${mysql_lead}" notify-bootstrapped
|
||||
fi
|
||||
${juju_run} -a mysql "hooks/update-status"
|
||||
until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]]
|
||||
do
|
||||
sleep 10
|
||||
done
|
||||
# This is so that nagios doesn't report that the mysql daemon is down
|
||||
# although the process is running. juju will then automatically start
|
||||
# the mysqld process
|
||||
${juju_ssh} "${bootstrap_unit}" -- sudo reboot
|
||||
fi
|
||||
|
||||
${juju_run} -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
|
||||
}
|
3
get_ca_cert.sh
Normal file
3
get_ca_cert.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
juju run vault/leader get-root-ca --format json | jq -rc ".[].results.output" > root_ca.cert
|
13
restart_juju_nojuju.sh
Executable file
13
restart_juju_nojuju.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
machine=${1:-0}
|
||||
model=${2:-foundation-maas}
|
||||
|
||||
#host=$(juju show-controller ${model} --format json | jq -rc '."'${model}'".details."api-endpoints"['$machine']' | awk -F: '{print $1}')
|
||||
host=$(cat ~/.local/share/juju/controllers.yaml | yq '.controllers."'${model}'"."api-endpoints"['$machine']' | awk -F: '{print $1}')
|
||||
|
||||
cmds="sudo systemctl restart jujud-machine-\*"
|
||||
|
||||
ssh_key=$HOME/.local/share/juju/ssh/juju_id_rsa
|
||||
|
||||
ssh -o IdentityAgent=none -l ubuntu -i ${ssh_key} ${host} "${cmds}"
|
@ -1,18 +1,18 @@
|
||||
Unseal Key 1: BXyPvDeMDzfzarrN2Gmtl0YBuCuhrSllhYindRR2Sdle
|
||||
Unseal Key 2: EhN9f3J1+FdawXsApoUNT3MlSMTk7zCBBWteuYj6azgf
|
||||
Unseal Key 3: XGTCoW5eMSykc+Gn1A01gTtM2wxpIi9i5hTydFni+AQJ
|
||||
Unseal Key 4: NcfyD1ORFugYg8wt7AXvEFpNlMUiPrxjDoRR359UbD8Y
|
||||
Unseal Key 5: UJMEzTKKVpNMtlhTDzOzwesBfI/Yfo5pfAViHnDvtZ3D
|
||||
Unseal Key 1: RtovEr6q8xNSaCImqaWpp002PphC+o/AwZm9sOmbK1Vg
|
||||
Unseal Key 2: teEUylJ1EzNRB4ZJuUTwFaK4BA3xOt9KUhcSIVKXi0TF
|
||||
Unseal Key 3: 8wZmMJJ6TqCjB5/RDXbqJC6t0+MHJBjm+CdIu0db59eV
|
||||
Unseal Key 4: mYNQzPMnxNRqGwFylLgLsFD2eeL1GmAD6y75SjTsaXUm
|
||||
Unseal Key 5: kPBl+CC6HGNXho3XrBAvl4gnYU381P5wZ9SnurenUKTU
|
||||
|
||||
Initial Root Token: hvs.tyA2bCvimuDETIFyeDw2rqU5
|
||||
Initial Root Token: s.GqPbY2ceIiaxiuMKj3QserDL
|
||||
|
||||
Vault initialized with 5 key shares and a key threshold of 3. Please securely
|
||||
distribute the key shares printed above. When the Vault is re-sealed,
|
||||
restarted, or stopped, you must supply at least 3 of these keys to unseal it
|
||||
before it can start servicing requests.
|
||||
|
||||
Vault does not store the generated master key. Without at least 3 key to
|
||||
reconstruct the master key, Vault will remain permanently sealed!
|
||||
Vault does not store the generated root key. Without at least 3 keys to
|
||||
reconstruct the root key, Vault will remain permanently sealed!
|
||||
|
||||
It is possible to generate new unseal keys, provided you have a quorum of
|
||||
existing unseal keys shares. See "vault operator rekey" for more information.
|
||||
|
19
vault.sh
19
vault.sh
@ -1,19 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
vault_file="vault-secrets.txt"
|
||||
args="$@"
|
||||
|
||||
juju run -a vault -- sudo systemctl restart vault
|
||||
|
||||
IPS=$(juju status --format json | jq -r '.applications.vault.units | to_entries[].value."public-address"')
|
||||
|
||||
for ip in $IPS
|
||||
do
|
||||
echo export VAULT_ADDR=http://${ip}:8200;
|
||||
export VAULT_ADDR=http://${ip}:8200;
|
||||
for vault_key in $(head -n3 ${vault_file} | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify $vault_key
|
||||
vault operator unseal -tls-skip-verify $vault_key
|
||||
done
|
||||
done
|
||||
. functions.sh
|
||||
check_juju_version
|
||||
|
||||
get_juju_status
|
||||
|
||||
do_vault ${args}
|
||||
|
@ -1,24 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
. functions.sh
|
||||
check_juju_version
|
||||
|
||||
get_juju_status
|
||||
|
||||
vault_file="vault-secrets.txt"
|
||||
vault_token_file="vault-token.txt"
|
||||
|
||||
vault_vip=$(juju config vault vip)
|
||||
|
||||
export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
|
||||
vault operator init -key-shares=5 -key-threshold=3 > ${vault_file}
|
||||
|
||||
IPS=$(juju status vault --format json | jq -r '.applications.vault.units | to_entries[].value."public-address"')
|
||||
|
||||
for ip in $IPS;do
|
||||
echo export VAULT_ADDR=http://${ip}:8200;
|
||||
export VAULT_ADDR=http://${ip}:8200;
|
||||
for vault_key in $(head -n3 ${vault_file} | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify $vault_key
|
||||
vault operator unseal -tls-skip-verify $vault_key
|
||||
done;
|
||||
done;
|
||||
do_vault
|
||||
|
||||
initial_token=$(grep Initial ${vault_file} | awk '{print $4}')
|
||||
|
||||
@ -27,6 +19,6 @@ export VAULT_TOKEN=${initial_token}
|
||||
|
||||
vault token create -ttl=10m > ${vault_token_file}
|
||||
|
||||
token=$(cat ${vault_token_file} | grep token | head -n 1 | awk '{print $2}')
|
||||
token=$(grep token ${vault_token_file} | head -n 1 | awk '{print $2}')
|
||||
|
||||
juju run-action --wait vault/leader authorize-charm token=${token}
|
||||
${juju_run_action} vault/leader authorize-charm token=${token}
|
||||
|
Loading…
Reference in New Issue
Block a user