Many Updates
* Combine bionic and focal fix_cloud script * Add latest vault keys for my lab * Add new shudown script for my lab envs
This commit is contained in:
parent
ae49084c17
commit
4ee03a9887
2
.gitignore
vendored
2
.gitignore
vendored
@ -1 +1,3 @@
|
||||
vault-token.txt
|
||||
controller_cert.crt
|
||||
license.txt
|
||||
|
193
fix_cloud.sh
193
fix_cloud.sh
@ -14,11 +14,11 @@ check_unit_status()
|
||||
|
||||
unit_status=$(juju status --format json | jq -rc ".applications.${app_name}.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
|
||||
|
||||
app_units=$(echo ${unit_status} | jq .sub | sed s/\"//g)
|
||||
app_units=$(echo "${unit_status}" | jq -r .sub)
|
||||
|
||||
num=0
|
||||
for unit in ${app_units} ; do
|
||||
this_unit_status=$(echo $unit_status | jq -rc . | grep ${unit} | jq .status | sed s/\"//g)
|
||||
this_unit_status=$(echo "$unit_status" | jq -rc . | grep "${unit}" | jq -r .status)
|
||||
if [[ "${this_unit_status}" == "${status_check}" ]] ; then
|
||||
(( num++ ))
|
||||
fi
|
||||
@ -33,7 +33,7 @@ get_lead()
|
||||
{
|
||||
app_name=$1
|
||||
|
||||
cat ${juju_status} | jq -rc ".applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key"
|
||||
jq -rc ".applications.${app_name}.units | to_entries[] | select(.value.leader == "true") | .key" "${juju_status}"
|
||||
}
|
||||
|
||||
do_vault()
|
||||
@ -44,14 +44,14 @@ do_vault()
|
||||
|
||||
echo " "
|
||||
|
||||
IPS=$(cat ${juju_status} | jq '.applications.vault.units | to_entries[] | .value."public-address"' | sed s/\"//g)
|
||||
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status}")
|
||||
|
||||
for ip in $IPS;do
|
||||
echo export VAULT_ADDR=http://${ip}:8200;
|
||||
export VAULT_ADDR=http://${ip}:8200;
|
||||
echo export VAULT_ADDR=http://"${ip}":8200;
|
||||
export VAULT_ADDR=http://"${ip}":8200;
|
||||
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify $vault_key
|
||||
vault operator unseal -tls-skip-verify $vault_key
|
||||
echo vault operator unseal -tls-skip-verify "$vault_key"
|
||||
vault operator unseal -tls-skip-verify "$vault_key"
|
||||
done;
|
||||
done;
|
||||
|
||||
@ -62,61 +62,66 @@ juju-wait -v
|
||||
|
||||
juju_status=$(mktemp)
|
||||
|
||||
juju status --format json > ${juju_status}
|
||||
juju status --format json > "${juju_status}"
|
||||
|
||||
mysql_status=$(cat ${juju_status} | jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}")
|
||||
# Check if we're using percona-cluster or mysql-innodb-cluster
|
||||
percona_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"percona-cluster\") | .key" "${juju_status}")
|
||||
mysql_innodb_cluster=$(jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-innodb-cluster\") | .key" "${juju_status}")
|
||||
|
||||
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
|
||||
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
if [[ -n "${percona_cluster}" ]] ; then
|
||||
|
||||
mysql_units=$(echo ${mysql_status} | jq .sub | sed s/\"//g)
|
||||
bootstrap_unit=""
|
||||
mysql_status=$(jq -rc ".applications.mysql.units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
|
||||
|
||||
mysql_lead=$(get_lead mysql)
|
||||
#{"sub":"mysql/0","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 1"}
|
||||
#{"sub":"mysql/1","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
#{"sub":"mysql/2","status":"MySQL is down. Sequence Number: 102921. Safe To Bootstrap: 0"}
|
||||
|
||||
safe_to_bootstrap=$(echo $mysql_status | jq -rc . | grep "Safe To Bootstrap: 1" | jq .sub | sed s/\"//g)
|
||||
mysql_units=$(echo "${mysql_status}" | jq -r .sub)
|
||||
bootstrap_unit=""
|
||||
|
||||
if [[ -n "$safe_to_bootstrap" ]]
|
||||
then
|
||||
mysql_lead=$(get_lead mysql)
|
||||
|
||||
bootstrap_unit=$safe_to_bootstrap
|
||||
safe_to_bootstrap=$(echo "$mysql_status" | jq -rc . | grep "Safe To Bootstrap: 1" | jq -r .sub)
|
||||
|
||||
else
|
||||
if [[ -n "$safe_to_bootstrap" ]]
|
||||
then
|
||||
|
||||
seq_number=$(echo $mysql_status | jq -rc . | grep "Sequence Number" )
|
||||
bootstrap_unit=$safe_to_bootstrap
|
||||
|
||||
if [[ -n "${seq_number}" ]]
|
||||
else
|
||||
|
||||
seq_number=$(echo "$mysql_status" | jq -rc . | grep "Sequence Number" )
|
||||
|
||||
if [[ -n "${seq_number}" ]]
|
||||
then
|
||||
|
||||
seqs=$(echo "$seq_number" | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}")
|
||||
|
||||
uniq_seqs=$(echo "$seqs" | jq -r .seq | sort -n | uniq)
|
||||
seq_count=$(echo "$uniq_seqs" | xargs | wc -w)
|
||||
|
||||
highest_seq=$(echo "${seqs}" | jq -r .seq | sort -n | uniq | tail -n 1)
|
||||
|
||||
if [[ ${seq_count} -eq 1 ]]
|
||||
then # same seq numbers all round
|
||||
if [[ ${highest_seq} -eq -1 ]]
|
||||
then # if all seq numbers are -1
|
||||
echo "The sequence number is -1 ... exiting"
|
||||
exit 1
|
||||
fi
|
||||
bootstrap_unit=${mysql_lead}
|
||||
else # we have different seq numbers
|
||||
|
||||
unit_high_seq=$(echo "$seqs" | jq -rc . | grep "${highest_seq}" | jq -r .sub | tail -n 1)
|
||||
|
||||
bootstrap_unit=${unit_high_seq}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${bootstrap_unit} ]]
|
||||
then
|
||||
|
||||
seqs=$(echo $seq_number | jq -rc ". | {sub:.sub,seq:(.status|split(\".\")[1]|split(\": \")[1])}")
|
||||
|
||||
uniq_seqs=$(echo $seqs| jq .seq | sed s/\"//g | sort -n | uniq)
|
||||
seq_count=$(echo $uniq_seqs | xargs | wc -w)
|
||||
|
||||
highest_seq=$(echo "${seqs}"| jq .seq | sed s/\"//g | sort -n | uniq | tail -n 1)
|
||||
lowest_seq=$(echo "${seqs}"| jq .seq | sed s/\"//g | sort -n | uniq | head -n 1)
|
||||
|
||||
if [[ ${seq_count} -eq 1 ]]
|
||||
then # same seq numbers all round
|
||||
if [[ ${highest_seq} -eq -1 ]]
|
||||
then # if all seq numbers are -1
|
||||
echo "The sequence number is -1 ... exiting"
|
||||
exit 1
|
||||
fi
|
||||
bootstrap_unit=${mysql_lead}
|
||||
else # we have different seq numbers
|
||||
|
||||
unit_high_seq=$(echo $seqs | jq -rc . | grep ${highest_seq} | jq .sub | sed s/\"//g | tail -n 1)
|
||||
|
||||
bootstrap_unit=${unit_high_seq}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${bootstrap_unit} ]]
|
||||
then
|
||||
juju run-action --wait ${bootstrap_unit} bootstrap-pxc
|
||||
juju run-action --wait "${bootstrap_unit}" bootstrap-pxc
|
||||
juju run --application mysql "hooks/update-status"
|
||||
until [[ $(check_unit_status mysql "Unit waiting for cluster bootstrap") -eq 1 ]]
|
||||
do
|
||||
@ -125,14 +130,12 @@ then
|
||||
if [[ "${bootstrap_unit}" == "${mysql_lead}" ]] ; then
|
||||
for unit in ${mysql_units}; do
|
||||
if [[ "${unit}" != "${mysql_lead}" ]] ; then
|
||||
juju run-action --wait ${unit} notify-bootstrapped
|
||||
ran_bootstrap="true"
|
||||
juju run-action --wait "${unit}" notify-bootstrapped
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
juju run-action --wait ${mysql_lead} notify-bootstrapped
|
||||
ran_bootstrap="true"
|
||||
juju run-action --wait "${mysql_lead}" notify-bootstrapped
|
||||
fi
|
||||
juju run -a mysql "hooks/update-status"
|
||||
until [[ $(check_unit_status mysql "Unit is ready") -eq 1 ]]
|
||||
@ -142,51 +145,71 @@ then
|
||||
# This is so that nagios doesn't report that the mysql daemon is down
|
||||
# although the process is running. juju will then automatically start
|
||||
# the mysqld process
|
||||
juju ssh ${bootstrap_unit} -- sudo reboot
|
||||
juju ssh "${bootstrap_unit}" -- sudo reboot
|
||||
fi
|
||||
|
||||
juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
|
||||
fi
|
||||
|
||||
if [[ -n "${mysql_innodb_cluster}" ]] ; then
|
||||
|
||||
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
|
||||
|
||||
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
|
||||
|
||||
if [[ -z "${is_ready}" ]] ; then
|
||||
reboot_status=$(juju run-action mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --wait --format json)
|
||||
|
||||
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
|
||||
|
||||
if [[ ${outcome} == null ]] ; then
|
||||
|
||||
output=$(echo "$reboot_status" | jq .[].results.output)
|
||||
|
||||
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
|
||||
|
||||
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status}")
|
||||
|
||||
juju run-action "${bootstrap_unit}" reboot-cluster-from-complete-outage --wait
|
||||
|
||||
fi
|
||||
|
||||
juju run --application mysql-innodb-cluster "hooks/update-status"
|
||||
|
||||
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
|
||||
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status}" \
|
||||
| xargs -I{} juju run -a "{}" -- 'hooks/update-status'
|
||||
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
juju run -a nova-cloud-controller -- sudo systemctl restart nova-api-os-compute nova-conductor nova-consoleauth &
|
||||
juju run -a heat -- sudo systemctl restart heat-engine &
|
||||
juju run -a vault -- sudo systemctl restart vault &
|
||||
|
||||
wait
|
||||
|
||||
for app in nova-cloud-controller heat vault ; do
|
||||
juju run -a $app "hooks/update-status" &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
# cleanup all crm resources
|
||||
cat ${juju_status} | jq ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" | sed s/\"//g | xargs -i juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
|
||||
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status}" \
|
||||
| xargs -I{} juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
|
||||
|
||||
do_vault
|
||||
|
||||
# Wait 10 seconds, and ensure that vault is unsealed
|
||||
echo "Sleeping 10 seconds to wait for vault to finalise unseal"
|
||||
sleep 10
|
||||
juju run --all -- sudo systemctl restart systemd-resolved
|
||||
|
||||
ceph_osd_apps=$(cat ${juju_status} | jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key")
|
||||
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status}")
|
||||
|
||||
ceph_osds=""
|
||||
for apps in ${ceph_osd_apps}
|
||||
do
|
||||
ceph_osds="${ceph_osds} $(cat ${juju_status} | jq -rc ". | .applications[\"${apps}\"].units | to_entries[] | .key")"
|
||||
done
|
||||
|
||||
|
||||
for ceph_osd in ${ceph_osds}
|
||||
do
|
||||
juju ssh ${ceph_osd} -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@* ;
|
||||
sudo systemctl start --all --type=service vaultlocker-decrypt@* ;
|
||||
sudo systemctl start --all --type=service ceph-volume@*' &
|
||||
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@*'
|
||||
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service vaultlocker-decrypt@*'
|
||||
juju run -a "${apps}" --timeout 30s -- 'sudo systemctl start --all --type=service ceph-volume@*' &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
juju run -a ceph-osd "hooks/update-status"
|
||||
|
||||
lds_servers=$(cat ${juju_status} | jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key")
|
||||
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status}")
|
||||
|
||||
cat > /tmp/restart-landscape.sh << EOF
|
||||
#!/bin/bash
|
||||
@ -196,11 +219,9 @@ EOF
|
||||
|
||||
for lds_server in ${lds_servers}
|
||||
do
|
||||
juju scp /tmp/restart-landscape.sh ${lds_server}:.
|
||||
juju ssh ${lds_server} chmod +x restart-landscape.sh
|
||||
juju ssh ${lds_server} sudo ./restart-landscape.sh &
|
||||
juju scp /tmp/restart-landscape.sh "${lds_server}":.
|
||||
juju ssh "${lds_server}" chmod +x restart-landscape.sh
|
||||
juju ssh "${lds_server}" sudo ./restart-landscape.sh &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
juju run --all -- sudo systemctl restart systemd-resolved
|
||||
|
@ -1,132 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Used for debugging
|
||||
# set -ax
|
||||
|
||||
# This script is required after a reboot of the cloud after the cloud has been
|
||||
# shut down
|
||||
|
||||
get_lead()
|
||||
{
|
||||
app_name=$1
|
||||
|
||||
jq -rc ".applications.\"${app_name}\".units | to_entries[] | select(.value.leader == "true") | .key" "${juju_status}"
|
||||
}
|
||||
|
||||
do_vault()
|
||||
{
|
||||
vault_vip=$(juju config vault vip)
|
||||
echo export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
export VAULT_ADDR="http://${vault_vip}:8200"
|
||||
|
||||
echo " "
|
||||
|
||||
IPS=$(jq -r '.applications.vault.units | to_entries[].value."public-address"' "${juju_status}")
|
||||
|
||||
for ip in $IPS;do
|
||||
echo export VAULT_ADDR=http://"${ip}":8200;
|
||||
export VAULT_ADDR=http://"${ip}":8200;
|
||||
for vault_key in $(head -n3 vault-secrets.txt | awk '{print $4}');do
|
||||
echo vault operator unseal -tls-skip-verify "$vault_key"
|
||||
vault operator unseal -tls-skip-verify "$vault_key"
|
||||
done;
|
||||
done;
|
||||
|
||||
juju run -a vault "hooks/update-status"
|
||||
}
|
||||
|
||||
juju-wait -v
|
||||
|
||||
juju_status=$(mktemp)
|
||||
|
||||
juju status --format json > "${juju_status}"
|
||||
|
||||
bootstrap_unit=""
|
||||
|
||||
mysql_status=$(jq -rc ".applications.\"mysql-innodb-cluster\".units | to_entries[] | {sub:.key,status:.value[\"workload-status\"].message}" "${juju_status}")
|
||||
|
||||
is_ready=$(echo "$mysql_status" | jq -rc . | grep "Mode: R/W, Cluster is ONLINE" | jq -r .sub)
|
||||
|
||||
if [[ -z "${is_ready}" ]] ; then
|
||||
reboot_status=$(juju run-action mysql-innodb-cluster/leader reboot-cluster-from-complete-outage --wait --format json)
|
||||
|
||||
outcome=$(echo "$reboot_status" | jq .[].results.outcome)
|
||||
|
||||
if [[ ${outcome} == null ]] ; then
|
||||
|
||||
output=$(echo "$reboot_status" | jq .[].results.output)
|
||||
|
||||
mysql_ip=$(echo "$output" | sed -e 's/\\n/\n/g' 2>&1| grep Please | sed -e "s|.*Please use the most up to date instance: '\(.*\):.*|\1|")
|
||||
|
||||
bootstrap_unit=$(jq -r ".applications.\"mysql-innodb-cluster\".units | to_entries[] | select(.value.\"public-address\" == \"${mysql_ip}\") | .key" "${juju_status}")
|
||||
|
||||
juju run-action "${bootstrap_unit}" reboot-cluster-from-complete-outage --wait
|
||||
|
||||
fi
|
||||
|
||||
juju run --application mysql-innodb-cluster "hooks/update-status"
|
||||
|
||||
# Run update-status on all mysql-router units, to ensure that theyave connected to the mysql-innodb-cluster
|
||||
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"mysql-router\") | .key" "${juju_status}" \
|
||||
| xargs -I{} juju run -a "{}" -- 'hooks/update-status'
|
||||
|
||||
fi
|
||||
|
||||
juju run -a heat -- sudo systemctl restart heat-engine &
|
||||
juju run -a vault -- sudo systemctl restart vault &
|
||||
|
||||
wait
|
||||
|
||||
for app in heat vault ; do
|
||||
juju run -a $app "hooks/update-status" &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
# cleanup all crm resources
|
||||
jq -r ".applications | to_entries[] | select(.value[\"charm-name\"] == \"hacluster\") | .key" "${juju_status}" \
|
||||
| xargs -I{} juju run --unit "{}"/leader -- 'sudo crm_resource -l | sed s/:.*//g | uniq | xargs -i sudo crm resource cleanup \"\{\}\"'
|
||||
|
||||
do_vault
|
||||
|
||||
# Wait 10 seconds, and ensure that vault is unsealed
|
||||
echo "Sleeping 10 seconds to wait for vault to finalise unseal"
|
||||
sleep 10
|
||||
|
||||
ceph_osd_apps=$(jq -rc ".applications | to_entries[] | select(.value[\"charm-name\"] == \"ceph-osd\") | .key" "${juju_status}")
|
||||
|
||||
ceph_osds=""
|
||||
for apps in ${ceph_osd_apps}
|
||||
do
|
||||
ceph_osds="${ceph_osds} $(jq -rc ". | .applications[\"${apps}\"].units | to_entries[] | .key" "${juju_status}")"
|
||||
done
|
||||
|
||||
juju run --all -- sudo systemctl restart systemd-resolved
|
||||
|
||||
for ceph_osd in ${ceph_osds}
|
||||
do
|
||||
juju ssh "${ceph_osd}" -- 'sudo systemctl kill --all --type=service vaultlocker-decrypt@* ;
|
||||
sudo systemctl start --all --type=service vaultlocker-decrypt@* ;
|
||||
sudo systemctl start --all --type=service ceph-volume@*' &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
juju run -a ceph-osd "hooks/update-status"
|
||||
|
||||
lds_servers=$(jq -rc ". | .applications[\"landscape-server\"].units | to_entries[] | .key" "${juju_status}")
|
||||
|
||||
cat > /tmp/restart-landscape.sh << EOF
|
||||
#!/bin/bash
|
||||
|
||||
sudo systemctl restart landscape-*
|
||||
EOF
|
||||
|
||||
for lds_server in ${lds_servers}
|
||||
do
|
||||
juju scp /tmp/restart-landscape.sh "${lds_server}":.
|
||||
juju ssh "${lds_server}" chmod +x restart-landscape.sh
|
||||
juju ssh "${lds_server}" sudo ./restart-landscape.sh &
|
||||
done
|
||||
|
||||
wait
|
1
fix_cloud_focal.sh
Symbolic link
1
fix_cloud_focal.sh
Symbolic link
@ -0,0 +1 @@
|
||||
fix_cloud.sh
|
12
shutdown_nodes.sh
Executable file
12
shutdown_nodes.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
rcmd="ssh"
|
||||
cmd="sudo halt -p"
|
||||
|
||||
if [[ "$1" == "asrock" ]] ; then
|
||||
hosts="192.168.1.21[1-3]"
|
||||
elif [[ "$1" == "pi-k8s" ]] ; then
|
||||
hosts="192.168.1.8[1-6]"
|
||||
fi
|
||||
|
||||
pdsh -R $rcmd -l ubuntu -w $hosts -- $cmd
|
@ -1,10 +1,10 @@
|
||||
Unseal Key 1: Tbel8zdD7vS8g1bSUaW5Q+8KS3No+sNVrqGKodiPGJ57
|
||||
Unseal Key 2: vioPM3fJJ8hitlBq7NuzsrlKCiHRo7dKkJXuskNJlERP
|
||||
Unseal Key 3: T0JJT7ATamUa+Quk9VDWb2WvstQ3W7rGUC+CgJbZHo2v
|
||||
Unseal Key 4: fC8m3accpSLsVhK3u3OA1kpsoUuXTp11xZcJolvu8uvo
|
||||
Unseal Key 5: YfFun6w9cihqYDGak7c5Q1ZQxafI1vJ0B/d/XJqaUygC
|
||||
Unseal Key 1: xb11HFT+6KBH0y8Jglvmr2ljnVIWZk9YruHLJpR4oVot
|
||||
Unseal Key 2: aG8MUmlY0NbzHyIp5MSj98q7eCalUhrYmYXXMGozocSn
|
||||
Unseal Key 3: 1BMKM86m6JMZL9DCypY1rbsfvuiO22Tm0i9E2T6a/UUU
|
||||
Unseal Key 4: VZ36NLxKn/xC32TGDhThIPlndqSaXhmm3FFEewzoeAXk
|
||||
Unseal Key 5: bAZ77ThRY6RfHM00q7kkSmknY0tqXla092PDPkuP5NfW
|
||||
|
||||
Initial Root Token: s.90scV8c059y8IelnA33YAs6C
|
||||
Initial Root Token: s.EQOap6R44K8Hpx3o7lB5ORqs
|
||||
|
||||
Vault initialized with 5 key shares and a key threshold of 3. Please securely
|
||||
distribute the key shares printed above. When the Vault is re-sealed,
|
||||
|
Loading…
Reference in New Issue
Block a user