many modifications

* Add script to check innodb status
* Add check_mongo for when juju controllers are not working
* Add script to fix group replication
* Add script that fixes logrotate on nova-compute units
* Fine tuning other scripts
This commit is contained in:
Arif Ali 2023-02-22 14:16:27 +00:00
parent 24c20055f7
commit ab9f68b8e5
Signed by: arif
GPG Key ID: 369608FBA1353A70
8 changed files with 113 additions and 15 deletions

16
check_innodb.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
check_controller()
{
controller=$1
model="cpe-focal"
juju run-action -m "${controller}":${model} mysql-innodb-cluster/leader --wait cluster-status --format json | jq -rc '.[].results."cluster-status"' | jq
}
if [[ -z "$1" ]] ; then
check_controller "$(juju controllers --format json | jq -r .\"current-controller\")"
else
check_controller "${1}"
fi

View File

@ -5,13 +5,13 @@ check_controller()
controller=$1
model="cpe-focal"
juju status -m ${controller}:${model} --color | grep ^Unit -A 999999 | egrep -v "started.*focal|started.*bionic|active.*idle"
juju status -m "${controller}":${model} --color | grep ^Unit -A 999999 | grep -E -v "started.*focal|started.*bionic|active.*idle"
}
if [[ -z "$1" ]] ; then
check_controller $(juju controllers --format json | jq -r .\"current-controller\")
check_controller "$(juju controllers --format json | jq -r .\"current-controller\")"
else
check_controller $1
check_controller "${1}"
fi

25
check_mongo_status_nojuju.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
machine=${1:-0}
model=${2:-foundation-maas}
#host=$(juju show-controller ${model} --format json | jq -rc '."'${model}'".details."api-endpoints"['$machine']' | awk -F: '{print $1}')
host=$(cat ~/.local/share/juju/controllers.yaml | yq '.controllers."'${model}'"."api-endpoints"['$machine']' | awk -F: '{print $1}')
read -d '' -r cmds <<'EOF'
user=$(sudo ls /var/lib/juju/agents/ | grep machine)
conf=/var/lib/juju/agents/${user}/agent.conf
password=$(sudo grep statepassword ${conf} | cut -d' ' -f2)
if [ -f /usr/lib/juju/mongo*/bin/mongo ]; then
client=/usr/lib/juju/mongo*/bin/mongo
elif [ -f /usr/bin/mongo ]; then
client=/usr/bin/mongo
else
client=/snap/bin/juju-db.mongo
fi
${client} 127.0.0.1:37017/juju --authenticationDatabase admin --ssl --sslAllowInvalidCertificates --username "${user}" --password "${password}" --eval "rs.status()" | grep -P '(name|stateStr)'
EOF
ssh_key=$HOME/.local/share/juju/ssh/juju_id_rsa
ssh -l ubuntu -i ${ssh_key} ${host} "${cmds}"

21
do_group_replication.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
NODE=$1
usage() {
echo "Usage: ${0} [node-name]" 1>&2
exit 0
}
do_gr(){
MYSQL_UNIT=${NODE}
PASSWORD=$(juju run --unit mysql-innodb-cluster/leader leader-get mysql.passwd)
juju ssh $MYSQL_UNIT "sudo mysql -u root -p$PASSWORD -e \"stop group_replication; start group_replication;\""
}
if [ $# -ne 1 ]; then
usage
else
do_gr
fi

View File

@ -6,15 +6,19 @@
# This script is required after a reboot of the cloud after the cloud has been
# shut down
LMA_SERVERS="off"
check_juju_version()
{
juju_version=$(juju version | cut -d'-' -f1 | cut -d'.' -f1)
juju_run="juju run"
juju_timeout="--timeout 30s"
juju_run="juju run ${juju_timeout}"
juju_run_action="juju run-action --wait"
if [[ ${juju_version} -ge 3 ]] ; then
juju_run="juju exec"
juju_run="juju exec ${juju_timeout}"
juju_run_action="juju run --wait"
fi
}
@ -219,9 +223,9 @@ ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-nam
for apps in ${ceph_osd_apps}
do
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service ceph-volume@*' &
${juju_run} -a "${apps}" -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
${juju_run} -a "${apps}" -- 'sudo systemctl start --all --type=service ceph-volume@*' &
done
wait
@ -242,3 +246,5 @@ do
done
wait
[[ $LMA_SERVERS == "off" ]] && ${juju_run} -a lma-server -- sudo halt -p

View File

@ -9,5 +9,5 @@ juju status --format json > $juju_status
#cat ${juju_status} | jq -rc '.machines | to_entries[] | select(.value.containers != null ) | .value.containers | to_entries[] | [.key,.value.hostname,.value."ip-addresses"]'
# new method
cat ${juju_status} | jq -rc '.machines | to_entries[] |[.key,.value.hostname,.value."ip-addresses", [(select(.value.containers != null ) | .value.containers | to_entries[] | [.key,.value.hostname,.value."ip-addresses"])]]'
jq -rc '.machines | to_entries[] |[.key,.value.hostname,.value."ip-addresses", [(.value.containers//empty | to_entries[] | [.key,.value.hostname,.value."ip-addresses"])]]' ${juju_status}

30
logrotate_cis_fix.sh Normal file
View File

@ -0,0 +1,30 @@
#!/bin/bash
cat > /tmp/fix_logs.sh << EOF
#!/bin/bash
sudo chown syslog:adm /var/log/syslog /var/log/auth.log /var/log/dpkg.log /var/log/kern.log /var/log/mail.log /var/log/lastlog /var/log/ubuntu-advantage.log /var/log/haproxy.log
sudo sed -i 's/create 0640 root utmp/create 0640/g' /etc/logrotate.conf
sudo systemctl restart logrotate.timer
EOF
juju_status=$(mktemp)
juju status --format json > "${juju_status}"
timeout="--timeout 30s"
nova_compute_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"nova-compute\") | .key" "${juju_status}")
for app in ${nova_compute_apps}
do
nova_compute_units=$(jq -r '.applications."nova-compute".units | keys[]' "${juju_status}")
for unit in ${nova_compute_units}
do
juju scp /tmp/fix_logs.sh ${unit}:fix_logs.sh
done
juju run ${timeout} -a ${app} -- 'bash fix_logs.sh'
done

View File

@ -1,10 +1,10 @@
Unseal Key 1: 9dx2PJRhjffPIVDseRc0CJs+VFcAEKb7+aGoUkh0WNqK
Unseal Key 2: Ls2vbOvY/fKYk9B45iPpBV068z4CaWRgQAzwvJRef5bV
Unseal Key 3: dYa88MlsHCWakJF8s+Fe/IvXmVRO1Druk/7h3DPP7WE1
Unseal Key 4: wHTcIPrmJDF8GCFNzSRefrswjrxq+atTqvdl3QVhbabN
Unseal Key 5: C6DFXcr3I8W/aKupiktrYo0/VenuVs2jzzvrCMqI410l
Unseal Key 1: rDjUVcpsiwv3SDmAAavfkMBJ2O2H9XfsmMXOXaI46zT0
Unseal Key 2: ww7AgnhYINM7BpD1v2LHyBsPn10CWT2AnPm5KfCN48Ve
Unseal Key 3: jH5WxSmY0I+InM0W4n4L9H9jKXW1BLPGFLCMfVTF8tFb
Unseal Key 4: qfxj3ejTGD1tawonOjXRG+qdIFNZHSsRJIUqp6x2PLHw
Unseal Key 5: HGT+smfQdbGKQsHa+Aci1x3u2RPjNQZHJ8ouUPNXPpvC
Initial Root Token: hvs.DxM0VoLHc8f3vZ4csTVK21vU
Initial Root Token: hvs.iLynNrOHmG74IT29P6zMnShH
Vault initialized with 5 key shares and a key threshold of 3. Please securely
distribute the key shares printed above. When the Vault is re-sealed,