From d4fa9eb47f12881ee7842c1c2a4e2fafd58056c3 Mon Sep 17 00:00:00 2001 From: Arif Ali Date: Tue, 12 Oct 2021 20:24:31 +0100 Subject: [PATCH] Initial commit --- check_mongo.sh | 19 +++++ check_queues.sh | 36 ++++++++ debug-relations | 20 +++++ fix_cloud.sh | 172 +++++++++++++++++++++++++++++++++++++++ get_all_ips.sh | 9 ++ get_all_relation_info.sh | 25 ++++++ get_charm_versions.sh | 32 ++++++++ get_passwords.sh | 13 +++ grab_vips.sh | 12 +++ reset.sh | 18 ++++ vault.sh | 21 +++++ 11 files changed, 377 insertions(+) create mode 100755 check_mongo.sh create mode 100755 check_queues.sh create mode 100755 debug-relations create mode 100755 fix_cloud.sh create mode 100755 get_all_ips.sh create mode 100755 get_all_relation_info.sh create mode 100755 get_charm_versions.sh create mode 100755 get_passwords.sh create mode 100755 grab_vips.sh create mode 100755 reset.sh create mode 100755 vault.sh diff --git a/check_mongo.sh b/check_mongo.sh new file mode 100755 index 0000000..b02563a --- /dev/null +++ b/check_mongo.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +machine=${1:-0} +model=${2:-controller} + +read -d '' -r cmds <<'EOF' +conf=/var/lib/juju/agents/machine-*/agent.conf +user=`sudo grep tag $conf | cut -d' ' -f2` +password=`sudo grep statepassword $conf | cut -d' ' -f2` +if [ -f /usr/lib/juju/mongo*/bin/mongo ]; then + client=/usr/lib/juju/mongo*/bin/mongo +else + client=/usr/bin/mongo +fi +$client 127.0.0.1:37017/juju --authenticationDatabase admin --ssl --sslAllowInvalidCertificates --username "$user" --password "$password" +EOF + +juju ssh -m $model $machine "$cmds" + diff --git a/check_queues.sh b/check_queues.sh new file mode 100755 index 0000000..0698729 --- /dev/null +++ b/check_queues.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +check_controller() +{ + controller=$1 + model=$2 + + [[ -z "${model}" ]] && model="$(juju controllers --format json | jq -rc ".controllers | to_entries[] | select(.key == \"$controller\") | .value[\"current-model\"]")" + + echo ${controller}: + echo + + juju run -m ${controller}:${model} --unit rabbitmq-server/leader -- \ + sudo rabbitmqctl list_queues -p openstack pid | sed -e 's/<\([^.]*\).*>/\1/' | sort | uniq -c + +} + +if [[ -z "$1" ]] ; then + + controllers=$(juju controllers --format json | jq -rc ".controllers | to_entries[] | {controller:.key,model:.value[\"current-model\"]}") + + for controller_json in ${controllers} + do + controller=$(echo $controller_json | jq .controller | sed s/\"//g) + model=$(echo $controller_json | jq .model | sed s/\"//g) + check_controller ${controller} ${model} + done + +else + + model="" + [[ -n "$2" ]] && model=$2 + + check_controller $1 $model + +fi diff --git a/debug-relations b/debug-relations new file mode 100755 index 0000000..79ab3e2 --- /dev/null +++ b/debug-relations @@ -0,0 +1,20 @@ +#!/bin/sh +if [ $# -ne 2 ]; then + echo "Usage: $0 unit/N relation-name (e.g. $0 ceph-mon/0 client)" + exit 1 +fi + +relation_ids=$(juju run --unit $1 -- relation-ids $2) +echo $relation_ids + +for relation_id in $relation_ids; do + units=$(juju run --unit $1 -- relation-list -r $relation_id) + for unit in $units; do + echo ----- + echo from $1 get $relation_id $unit + juju run --unit $1 -- relation-get -r $relation_id - $unit + done & +done + +wait + diff --git a/fix_cloud.sh b/fix_cloud.sh new file mode 100755 index 0000000..f1f8e75 --- /dev/null +++ b/fix_cloud.sh @@ -0,0 +1,172 @@ +#!/bin/bash + +set -ax + +# This script is required after a reboot of the cloud after the cloud has been +# shut down + +check_unit_status() +{ + + app_name=$1 + status_check="$2" + + unit_status=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}") + + app_units=$(echo ${unit_status} | jq .sub | sed s/\"//g) + + num=0 + for unit in ${app_units} ; do + this_unit_status=$(echo $unit_status | jq -rc . | grep ${unit} | jq .status | sed s/\"//g) + if [[ "${this_unit_status}" == "${status_check}" ]] ; then + (( num++ )) + fi + done + + if [[ $num -ge 3 ]] ; then echo 1 + else echo 0 + fi +} + +get_lead() +{ + app_name=$1 + units=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | .key") + + for unit in ${units} ; do + is_leader=$(juju run --unit ${unit} "is-leader") + [[ "${is_leader}" == "True" ]] && unit_lead=${unit} && break + done + + echo $unit_lead +} + + +juju-wait -v + +mysql_status=$(juju status --format json | jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}") + +#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"} +#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"} +#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"} + +mysql_units=$(echo ${mysql_status} | jq .sub | sed s/\"//g) +bootstrap_unit="" + +mysql_lead=$(get_lead mysql) + +safe_to_bootstrap=$(echo $mysql_status | jq -rc . | grep "Safe To Bootstrap: 1" | jq .sub | sed s/\"//g) + +if [[ -n "$safe_to_bootstrap" ]] +then + + bootstrap_unit=$safe_to_bootstrap + +else + + #seq_number=$(echo $mysql_status | jq -rc . | grep "Sequence Number" | jq .status | sed s/\"//g) + seq_number=$(echo $mysql_status | jq -rc . | grep "Sequence Number" ) + + if [[ -n "${seq_number}" ]] + then + + seqs=$(echo $seq_number | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}") + + uniq_seqs=$(echo $seqs| jq .seq | sed s/\"//g | sort -n | uniq) + seq_count=$(echo $uniq_seqs | xargs | wc -w) + + if [[ ${seq_count} -eq 1 ]] + then # same seq numbers all round + bootstrap_unit=${mysql_lead} + else # we have different seq numbers + + highest_seq=$(echo $seqs| jq .seq | sed s/\"//g | sort -n | uniq | head -n 1) + unit_high_seq=$(echo seqs | jq -rc . | grep ${highest_seq} | jq .sub | sed s/\"//g) + + bootstrap_unit=${unit_high_seq} + fi + fi +fi + +if [[ -n ${bootstrap_unit} ]] +then + juju run-action --wait ${bootstrap_unit} bootstrap-pxc + juju run --application mysql "hooks/update-status" + until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]] + do + sleep 10 + done + if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then + for unit in ${mysql_units}; do + if [[ "${unit}" != "${mysql_lead}" ]] ; then + juju run-action --wait ${unit} notify-bootstrapped + ran_bootstrap="true" + break + fi + done + else + juju run-action --wait ${mysql_lead} notify-bootstrapped + ran_bootstrap="true" + fi + juju run -a mysql "hooks/update-status" + until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]] + do + sleep 10 + done + # This is so that nagios doesn't report that the mysql daemon is down + # although the process is running. juju will then automatically start + # the mysqld process + juju run --timeout 30s --unit ${bootstrap_unit} -- sudo reboot +fi + +juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth +juju run -a heat -- sudo systemctl restart heat-engine +juju run -a vault -- sudo systemctl restart vault + +juju run -a nova-cloud-controller "hooks/update-status" +juju run -a heat "hooks/update-status" + +# cleanup all crm resources +juju status --format json | jq ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" | sed s/\"//g | xargs -i juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"' + +cd ~/stsstack-bundles/openstack/arif/ +./vault.sh + +juju run -a vault "hooks/update-status" + +# Wait 10 seconds, and ensure that vault is unsealed +echo "Sleeping 10 seconds to wait for vault to finalise unseal" +sleep 10 + +ceph_osds=$(juju status ceph-osd --format json | jq -rc ". | .applications[\"ceph-osd\"].units | to_entries[] | .key") + +for ceph_osd in ${ceph_osds} +do + osds=$(juju ssh ${ceph_osd} -- sudo ceph-volume lvm list --format json | jq -rc ". | to_entries[] | {id:.key,key:.value[].tags[\"ceph.osd_fsid\"]}") + for osd in ${osds}; do + osd_id=$(echo $osd | jq .id | sed s/\"//g) + uuid=$(echo $osd | jq .key | sed s/\"//g) + juju ssh ${ceph_osd} -- sudo systemctl restart ceph-volume@lvm-${osd_id}-${uuid} + done +done + +juju run -a ceph-osd "hooks/update-status" + +lds_servers=$(juju status landscape-server --format json | jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key") + +cat > /tmp/restart-landscape.sh << EOF +#!/bin/bash + +sudo systemctl restart landscape-* +EOF + +for lds_server in ${lds_servers} +do + juju scp /tmp/restart-landscape.sh ${lds_server}:. + juju ssh ${lds_server} chmod +x restart-landscape.sh + juju ssh ${lds_server} sudo ./restart-landscape.sh & +done + +wait + +juju run --all -- sudo systemctl restart systemd-resolved diff --git a/get_all_ips.sh b/get_all_ips.sh new file mode 100755 index 0000000..5ae7f05 --- /dev/null +++ b/get_all_ips.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +juju_status=$(mktemp) + +juju status --format json > $juju_status + +cat ${juju_status} | jq -rc '.machines | to_entries[] |[.key,.value.hostname,.value."ip-addresses"]' +cat ${juju_status} | jq -rc '.machines | to_entries[] | select(.value.containers != null ) | .value.containers | to_entries[] | [.key,.value.hostname,.value."ip-addresses"]' + diff --git a/get_all_relation_info.sh b/get_all_relation_info.sh new file mode 100755 index 0000000..411924e --- /dev/null +++ b/get_all_relation_info.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# JP on Feb 11 2021 +# Usage: ./get_all_relations_info.sh +# Example: ./get_all_relations_info.sh mysql # This will default to "mysql/0" +# Example: ./get_all_relations_info.sh keystone/1 + +APP=`echo ${1} | awk -F\/+ '{print $1}'` +UNIT=`echo ${1} | awk -F\/+ '{print $2}'` + +[ -z "$UNIT" ] && UNIT=0 + +for r in `juju show-application ${APP} | grep endpoint-bindings -A999 | tail -n +3 | awk -F\: '{print $1}' | sort` +do + for i in `juju run --unit ${APP}/${UNIT} "relation-ids ${r}" | awk -F\: '{print $2}' | sort` + do + echo "===========================================" + echo "RELATION INFO FOR ${APP}/${UNIT} - ${r}:${i}" + echo "" + juju run --unit ${APP}/${UNIT} "relation-get -r ${r}:${i} - ${APP}/${UNIT}" + echo "===========================================" + done +done + +exit 0 diff --git a/get_charm_versions.sh b/get_charm_versions.sh new file mode 100755 index 0000000..cc1285e --- /dev/null +++ b/get_charm_versions.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# +# The purpose of this script is to grab the versions of the charms being used +# on a system that is controlled by juju. The script will output tht data in +# CSV format, which can easily be added to a spreadsheet ot other applications +# for undestanding of cuerrent revisions of charms. +# +# Authors: +# - Arif ali +# + +apps=$(juju status --format json | jq -rc ".applications | to_entries[] | {charm_name:.value[\"charm-name\"],charm:.value.charm,version:.value[\"charm-rev\"]}") + +for app in $apps +do + # each line will look similar to the one below + # + # {"charm_name":"openstack-service-checks","charm":"cs:~canonical-bootstack/openstack-service-checks-30","version":30} + + app_name=$(echo $app | jq .charm_name | sed s/\"//g) + charm=$(echo $app | jq .charm | sed s/\"//g) + charm_version=$(echo $app | jq .version) + + echo -n "${app_name}," + if [[ $charm =~ ^cs:~ ]] ; then + echo -n $charm | sed "s/^cs:~\(.*\)\/${app_name}-.*/\1,/g" + else + echo -n "," + fi + echo ${charm_version} +done | sort | uniq diff --git a/get_passwords.sh b/get_passwords.sh new file mode 100755 index 0000000..310ce16 --- /dev/null +++ b/get_passwords.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +keystone_passwd=$(juju run --unit keystone/leader 'leader-get admin_passwd') +nagios_passwd=$(juju run --unit nagios/leader 'sudo cat /var/lib/juju/nagios.passwd') +grafana_passwd=$(juju run-action --wait grafana/leader get-admin-password | grep password | awk '{print $2}') +graylog_passwd=$(juju run-action --wait graylog/leader show-admin-password | grep admin-password | awk '{print $2}') +mysql_passwd=$(juju run --unit mysql/leader 'leader-get root-password') + +echo "Keystone admin password: ... ${keystone_passwd}" +echo "nagios password: ... ${nagios_passwd}" +echo "grafana password: ... ${grafana_passwd}" +echo "graylog password: ... ${graylog_passwd}" +echo "mysql password: ... ${mysql_passwd}" diff --git a/grab_vips.sh b/grab_vips.sh new file mode 100755 index 0000000..8debb70 --- /dev/null +++ b/grab_vips.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +applications=$(juju status --format json | jq ".applications | to_entries[] | .key" | sed s/\"//g) + +for app in ${applications} +do + vip=$(juju config $app --format json | jq ".settings.vip.value" | sed s/\"//g) + if [[ $vip != "null" ]] ; then + echo "${app}: ${vip}" + fi +done + diff --git a/reset.sh b/reset.sh new file mode 100755 index 0000000..951160d --- /dev/null +++ b/reset.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +export DEBIAN_FRONTEND=noninteractive +export APT_LISTCHANGES_FRONTEND=none + +hold_apps="openssh-server snapd salt-minion byobu rsync ubuntu-release-upgrader-core git" + +sudo -E apt-get update +sudo -E apt-get -y install aptitude ubuntu-minimal +sudo -E aptitude markauto '~i!~nubuntu-minimal' +sudo -E apt-mark hold ${hold_apps} +sudo -E apt-get -yq autoremove +dpkg -l | grep ^rc | awk '{print $2}' | xargs -i sudo -E dpkg --force-all -P "{}" +sudo -E apt-mark unhold ${hold_apps} +sudo -E apt-get -y install ${hold_apps} +sudo -E apt-get update +sudo -E apt-get -y upgrade +sudo -E salt-call state.highstate diff --git a/vault.sh b/vault.sh new file mode 100755 index 0000000..1b94fbd --- /dev/null +++ b/vault.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +vault_vip=$(juju config vault vip) +echo export VAULT_ADDR="http://${vault_vip}:8200" +export VAULT_ADDR="http://${vault_vip}:8200" + +echo " " + +IPS=$(juju status vault --format json | jq '.applications.vault.units | to_entries[] | .value."public-address"' | sed s/\"//g) + +for ip in $IPS +do + echo export VAULT_ADDR=http://${ip}:8200; + export VAULT_ADDR=http://${ip}:8200; + for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do + echo vault operator unseal -tls-skip-verify $vault_key + vault operator unseal -tls-skip-verify $vault_key + done +done + +