mirror of
https://github.com/xcat2/confluent.git
synced 2024-11-21 17:11:58 +00:00
Draft EL7 diskless support
This commit is contained in:
parent
554e25d6cb
commit
eaa0921420
@ -0,0 +1,245 @@
|
||||
get_remote_apikey() {
|
||||
while [ -z "$confluent_apikey" ]; do
|
||||
/opt/confluent/bin/clortho $nodename $confluent_mgr > /etc/confluent/confluent.apikey
|
||||
if grep ^SEALED: /etc/confluent/confluent.apikey > /dev/null; then
|
||||
# we don't support remote sealed api keys anymore
|
||||
echo > /etc/confluent/confluent.apikey
|
||||
fi
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
if [ -z "$confluent_apikey" ]; then
|
||||
echo "Unable to acquire node api key, set deployment.apiarmed=once on node '$nodename', retrying..."
|
||||
sleep 10
|
||||
elif [ -c /dev/tpmrm0 -a -x /usr/bin/tpm2_startauthsession ]; then
|
||||
tmpdir=$(mktemp -d)
|
||||
cd $tmpdir
|
||||
tpm2_startauthsession --session=session.ctx
|
||||
tpm2_policypcr -Q --session=session.ctx --pcr-list="sha256:15" --policy=pcr15.sha256.policy
|
||||
tpm2_createprimary -G ecc -Q --key-context=prim.ctx
|
||||
(echo -n "CONFLUENT_APIKEY:";cat /etc/confluent/confluent.apikey) | tpm2_create -Q --policy=pcr15.sha256.policy --public=data.pub --private=data.priv -i - -C prim.ctx
|
||||
tpm2_load -Q --parent-context=prim.ctx --public=data.pub --private=data.priv --name=confluent.apikey --key-context=data.ctx
|
||||
tpm2_evictcontrol -Q -c data.ctx
|
||||
tpm2_flushcontext session.ctx
|
||||
cd - > /dev/null
|
||||
rm -rf $tmpdir
|
||||
fi
|
||||
done
|
||||
}
|
||||
root=1
|
||||
rootok=1
|
||||
netroot=confluent
|
||||
clear
|
||||
mkdir -p /etc/ssh
|
||||
mkdir -p /var/tmp/
|
||||
mkdir -p /var/empty/sshd
|
||||
mkdir -p /etc/confluent
|
||||
sed -i '/^root:x/d' /etc/passwd
|
||||
echo root:x:0:0::/:/bin/bash >> /etc/passwd
|
||||
echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd
|
||||
|
||||
if ! grep console= /proc/cmdline >& /dev/null; then
|
||||
autocons=$(/opt/confluent/bin/autocons)
|
||||
autocons=${autocons##*/}
|
||||
echo "Automatic console configured for $autocons"
|
||||
fi
|
||||
echo "Initializing confluent diskless environment"
|
||||
echo -n "udevd: "
|
||||
/usr/lib/systemd/systemd-udevd --daemon
|
||||
echo -n "Loading drivers..."
|
||||
udevadm trigger
|
||||
udevadm trigger --type=devices --action=add
|
||||
udevadm settle
|
||||
modprobe ib_ipoib
|
||||
modprobe ib_umad
|
||||
modprobe hfi1
|
||||
modprobe mlx5_ib
|
||||
echo "done"
|
||||
cat > /etc/ssh/sshd_config << EOF
|
||||
Port 2222
|
||||
Subsystem sftp /usr/libexec/openssh/sftp-server
|
||||
PermitRootLogin yes
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
EOF
|
||||
mkdir /root/.ssh
|
||||
mkdir /.ssh
|
||||
cat /ssh/*pubkey > /root/.ssh/authorized_keys 2>/dev/null
|
||||
cp /root/.ssh/authorized_keys /.ssh/
|
||||
cat /tls/*.pem > /etc/confluent/ca.pem
|
||||
mkdir -p /etc/pki/tls/certs
|
||||
cat /tls/*.pem > /etc/pki/tls/certs/ca-bundle.crt
|
||||
TRIES=0
|
||||
oldumask=$(umask)
|
||||
umask 0077
|
||||
tpmdir=$(mktemp -d)
|
||||
cd $tpmdir
|
||||
lasthdl=""
|
||||
if [ -c /dev/tpmrm0 -a -x /usr/bin/tpm2_getcap ]; then
|
||||
for hdl in $(tpm2_getcap handles-persistent|awk '{print $2}'); do
|
||||
tpm2_startauthsession --policy-session --session=session.ctx
|
||||
tpm2_policypcr -Q --session=session.ctx --pcr-list="sha256:15" --policy=pcr15.sha256.policy
|
||||
unsealeddata=$(tpm2_unseal --auth=session:session.ctx -Q -c $hdl 2>/dev/null)
|
||||
tpm2_flushcontext session.ctx
|
||||
if [[ $unsealeddata == "CONFLUENT_APIKEY:"* ]]; then
|
||||
confluent_apikey=${unsealeddata#CONFLUENT_APIKEY:}
|
||||
echo $confluent_apikey > /etc/confluent/confluent.apikey
|
||||
if [ -n "$lasthdl" ]; then
|
||||
tpm2_evictcontrol -c $lasthdl
|
||||
fi
|
||||
lasthdl=$hdl
|
||||
fi
|
||||
done
|
||||
fi
|
||||
cd - > /dev/null
|
||||
rm -rf $tpmdir
|
||||
touch /etc/confluent/confluent.info
|
||||
cd /sys/class/net
|
||||
echo -n "Scanning for network configuration..."
|
||||
while ! grep ^EXTMGRINFO: /etc/confluent/confluent.info | awk -F'|' '{print $3}' | grep 1 >& /dev/null && [ "$TRIES" -lt 30 ]; do
|
||||
TRIES=$((TRIES + 1))
|
||||
for i in *; do
|
||||
ip link set $i up
|
||||
done
|
||||
/opt/confluent/bin/copernicus -t > /etc/confluent/confluent.info
|
||||
done
|
||||
cd /
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
hostname $nodename
|
||||
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | head -n 1 | awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
|
||||
if [ -z "$confluent_mgr" ]; then
|
||||
confluent_mgr=$(grep ^MANAGER: /etc/confluent/confluent.info|head -n 1 | awk '{print $2}')
|
||||
fi
|
||||
if [[ $confluent_mgr == *%* ]]; then
|
||||
echo $confluent_mgr | awk -F% '{print $2}' > /tmp/confluent.ifidx
|
||||
ifidx=$(cat /tmp/confluent.ifidx)
|
||||
ifname=$(ip link |grep ^$ifidx:|awk '{print $2}')
|
||||
ifname=${ifname%:}
|
||||
fi
|
||||
|
||||
ready=0
|
||||
while [ $ready = "0" ]; do
|
||||
get_remote_apikey
|
||||
if [[ $confluent_mgr == *:* ]]; then
|
||||
confluent_mgr="[${confluent_mgr/\%/%25}]"
|
||||
fi
|
||||
tmperr=$(mktemp)
|
||||
curl -gsSf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/deploycfg > /etc/confluent/confluent.deploycfg 2> $tmperr
|
||||
if grep 401 $tmperr > /dev/null; then
|
||||
confluent_apikey=""
|
||||
if [ -n "$lasthdl" ]; then
|
||||
tpm2_evictcontrol -c $lasthdl
|
||||
fi
|
||||
confluent_mgr=${confluent_mgr#[}
|
||||
confluent_mgr=${confluent_mgr%]}
|
||||
else
|
||||
ready=1
|
||||
fi
|
||||
rm $tmperr
|
||||
done
|
||||
if [ ! -z "$autocons" ] && grep textconsole: true /etc/confluent/confluent.deploycfg > /dev/null; then /opt/confluent/bin/autocons -c > /dev/null; fi
|
||||
if [ -c /dev/tpmrm0 -a -x /usr/bin/tpm2_pcrextend ]; then
|
||||
tpm2_pcrextend 15:sha256=2fbe96c50dde38ce9cd2764ddb79c216cfbcd3499568b1125450e60c45dd19f2
|
||||
fi
|
||||
umask $oldumask
|
||||
autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
|
||||
if [ "$autoconfigmethod" = "dhcp" ]; then
|
||||
echo -n "Attempting to use dhcp to bring up $ifname..."
|
||||
dhclient $ifname
|
||||
echo "Complete:"
|
||||
ip addr show dev $ifname
|
||||
else
|
||||
v4addr=$(grep ^ipv4_address: /etc/confluent/confluent.deploycfg)
|
||||
v4addr=${v4addr#ipv4_address: }
|
||||
v4gw=$(grep ^ipv4_gateway: /etc/confluent/confluent.deploycfg)
|
||||
v4gw=${v4gw#ipv4_gateway: }
|
||||
if [ "$v4gw" = "null" ]; then
|
||||
v4gw=""
|
||||
fi
|
||||
v4nm=$(grep ^prefix: /etc/confluent/confluent.deploycfg)
|
||||
v4nm=${v4nm#prefix: }
|
||||
echo "Setting up $ifname as static at $v4addr/$v4nm"
|
||||
ip addr add dev $ifname $v4addr/$v4nm
|
||||
if [ ! -z "$v4gw" ]; then
|
||||
ip route add default via $v4gw
|
||||
fi
|
||||
mkdir -p /run/NetworkManager/system-connections
|
||||
cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
[connection]
|
||||
EOC
|
||||
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
type=ethernet
|
||||
autoconnect-retries=1
|
||||
EOC
|
||||
echo interface-name=$ifname >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
multi-connect=1
|
||||
permissions=
|
||||
wait-device-timeout=60000
|
||||
|
||||
[ethernet]
|
||||
mac-address-blacklist=
|
||||
|
||||
[ipv4]
|
||||
EOC
|
||||
echo address1=$v4addr/$v4nm >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
if [ ! -z "$v4gw" ]; then
|
||||
echo gateway=$v4gw >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
fi
|
||||
nameserversec=0
|
||||
nameservers=""
|
||||
while read -r entry; do
|
||||
if [ $nameserversec = 1 ]; then
|
||||
if [[ $entry == "-"* ]]; then
|
||||
nameservers="$nameservers"${entry#- }";"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
nameserversec=0
|
||||
if [ "${entry%:*}" = "nameservers" ]; then
|
||||
nameserversec=1
|
||||
continue
|
||||
fi
|
||||
done < /etc/confluent/confluent.deploycfg
|
||||
echo dns=$nameservers >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)
|
||||
dnsdomain=${dnsdomain#dnsdomain: }
|
||||
echo dns-search=$dnsdomain >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
may-fail=false
|
||||
method=manual
|
||||
|
||||
[ipv6]
|
||||
addr-gen-mode=eui64
|
||||
method=auto
|
||||
|
||||
[proxy]
|
||||
EOC
|
||||
fi
|
||||
chmod 600 /run/NetworkManager/system-connections/*.nmconnection
|
||||
echo -n "Initializing ssh..."
|
||||
ssh-keygen -A
|
||||
for pubkey in /etc/ssh/ssh_host*key.pub; do
|
||||
certfile=${pubkey/.pub/-cert.pub}
|
||||
privfile=${pubkey%.pub}
|
||||
curl -sf -X POST -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -d @$pubkey https://$confluent_mgr/confluent-api/self/sshcert > $certfile
|
||||
if [ -s $certfile ]; then
|
||||
echo HostCertificate $certfile >> /etc/ssh/sshd_config
|
||||
fi
|
||||
echo HostKey $privfile >> /etc/ssh/sshd_config
|
||||
done
|
||||
/usr/sbin/sshd
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg| awk '{print $2}')
|
||||
confluent_proto=$(grep ^protocol: /etc/confluent/confluent.deploycfg| awk '{print $2}')
|
||||
confluent_urls=""
|
||||
for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed -e s/%/%25/); do
|
||||
if [[ $addr == *:* ]]; then
|
||||
confluent_urls="$confluent_urls $confluent_proto://[$addr]/confluent-public/os/$confluent_profile/rootimg.sfs"
|
||||
else
|
||||
confluent_urls="$confluent_urls $confluent_proto://$addr/confluent-public/os/$confluent_profile/rootimg.sfs"
|
||||
fi
|
||||
done
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg| awk '{print $2}')
|
||||
mkdir -p /etc/confluent
|
||||
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions
|
||||
. /etc/confluent/functions
|
||||
source_remote imageboot.sh
|
@ -0,0 +1,4 @@
|
||||
. /etc/confluent/functions
|
||||
# This is a convenient place to keep customizations separate from modifying the stock scripts
|
||||
# While modification of the stock scripts is fine, it may be easier to rebase to a newer
|
||||
# stock profile if the '.custom' files are used.
|
0
confluent_osdeploy/el7-diskless/profiles/default/scripts/firstboot.d/.gitignore
vendored
Normal file
0
confluent_osdeploy/el7-diskless/profiles/default/scripts/firstboot.d/.gitignore
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=First Boot Process
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/confluent/bin/firstboot.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script is executed on the first boot after install has
|
||||
# completed. It is best to edit the middle of the file as
|
||||
# noted below so custom commands are executed before
|
||||
# the script notifies confluent that install is fully complete.
|
||||
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
tail -f /var/log/confluent/confluent-firstboot.log > /dev/console &
|
||||
logshowpid=$!
|
||||
while ! ping -c 1 $confluent_mgr >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ! -f /etc/confluent/firstboot.ran ]; then
|
||||
touch /etc/confluent/firstboot.ran
|
||||
|
||||
cat /etc/confluent/tls/*.pem >> /etc/pki/tls/certs/ca-bundle.crt
|
||||
|
||||
run_remote firstboot.custom
|
||||
# Firstboot scripts may be placed into firstboot.d, e.g. firstboot.d/01-firstaction.sh, firstboot.d/02-secondaction.sh
|
||||
run_remote_parts firstboot.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/firstboot.d/
|
||||
run_remote_config firstboot.d
|
||||
fi
|
||||
|
||||
curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
kill $logshowpid
|
@ -0,0 +1,196 @@
|
||||
#!/bin/bash
|
||||
function test_mgr() {
|
||||
if curl -s https://${1}/confluent-api/ > /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function confluentpython() {
|
||||
if [ -x /usr/libexec/platform-python ]; then
|
||||
/usr/libexec/platform-python $*
|
||||
elif [ -x /usr/bin/python3 ]; then
|
||||
/usr/bin/python3 $*
|
||||
elif [ -x /usr/bin/python ]; then
|
||||
/usr/bin/python $*
|
||||
elif [ -x /usr/bin/python2 ]; then
|
||||
/usr/bin/python2 $*
|
||||
fi
|
||||
}
|
||||
|
||||
function set_confluent_vars() {
|
||||
if [ -z "$nodename" ]; then
|
||||
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}')
|
||||
fi
|
||||
if [[ "$confluent_mgr" == *"%"* ]]; then
|
||||
confluent_mgr=""
|
||||
fi
|
||||
if [ -z "$confluent_mgr" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
|
||||
if ! test_mgr $confluent_mgr; then
|
||||
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
|
||||
if [[ "$confluent_mgr" = *":"* ]]; then
|
||||
confluent_mgr="[$confluent_mgr]"
|
||||
fi
|
||||
fi
|
||||
if ! test_mgr $confluent_mgr; then
|
||||
BESTMGRS=$(grep ^EXTMGRINFO: /etc/confluent/confluent.info | grep '|1$' | sed -e 's/EXTMGRINFO: //' -e 's/|.*//')
|
||||
OKMGRS=$(grep ^EXTMGRINFO: /etc/confluent/confluent.info | grep '|0$' | sed -e 's/EXTMGRINFO: //' -e 's/|.*//')
|
||||
for confluent_mgr in $BESTMGRS $OKMGRS; do
|
||||
if [[ $confluent_mgr == *":"* ]]; then
|
||||
confluent_mgr="[$confluent_mgr]"
|
||||
fi
|
||||
if test_mgr $confluent_mgr; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
if [ -z "$confluent_profile" ]; then
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
|
||||
fi
|
||||
}
|
||||
|
||||
fetch_remote() {
|
||||
curlargs=""
|
||||
if [ -f /etc/confluent/ca.pem ]; then
|
||||
curlargs=" --cacert /etc/confluent/ca.pem"
|
||||
fi
|
||||
set_confluent_vars
|
||||
mkdir -p $(dirname $1)
|
||||
curl -f -sS $curlargs https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/$1 > $1
|
||||
if [ $? != 0 ]; then echo $1 failed to download; return 1; fi
|
||||
}
|
||||
|
||||
source_remote_parts() {
|
||||
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
|
||||
apiclient=/opt/confluent/bin/apiclient
|
||||
if [ -f /etc/confluent/apiclient ]; then
|
||||
apiclient=/etc/confluent/apiclient
|
||||
fi
|
||||
scriptlist=$(confluentpython $apiclient /confluent-api/self/scriptlist/$1|sed -e 's/^- //')
|
||||
for script in $scriptlist; do
|
||||
source_remote $1/$script
|
||||
done
|
||||
rm -rf $confluentscripttmpdir
|
||||
unset confluentscripttmpdir
|
||||
}
|
||||
|
||||
run_remote_parts() {
|
||||
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
|
||||
apiclient=/opt/confluent/bin/apiclient
|
||||
if [ -f /etc/confluent/apiclient ]; then
|
||||
apiclient=/etc/confluent/apiclient
|
||||
fi
|
||||
scriptlist=$(confluentpython $apiclient /confluent-api/self/scriptlist/$1|sed -e 's/^- //')
|
||||
for script in $scriptlist; do
|
||||
run_remote $1/$script
|
||||
done
|
||||
rm -rf $confluentscripttmpdir
|
||||
unset confluentscripttmpdir
|
||||
}
|
||||
|
||||
source_remote() {
|
||||
set_confluent_vars
|
||||
unsettmpdir=0
|
||||
echo
|
||||
echo '---------------------------------------------------------------------------'
|
||||
echo Sourcing $1 from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
|
||||
if [ -z "$confluentscripttmpdir" ]; then
|
||||
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
|
||||
unsettmpdir=1
|
||||
fi
|
||||
echo Sourcing from $confluentscripttmpdir
|
||||
cd $confluentscripttmpdir
|
||||
fetch_remote $1
|
||||
if [ $? != 0 ]; then echo $1 failed to download; return 1; fi
|
||||
chmod +x $1
|
||||
cmd=$1
|
||||
shift
|
||||
source ./$cmd
|
||||
cd - > /dev/null
|
||||
if [ "$unsettmpdir" = 1 ]; then
|
||||
rm -rf $confluentscripttmpdir
|
||||
unset confluentscripttmpdir
|
||||
unsettmpdir=0
|
||||
fi
|
||||
rm -rf $confluentscripttmpdir
|
||||
return $retcode
|
||||
}
|
||||
|
||||
run_remote() {
|
||||
requestedcmd="'$*'"
|
||||
unsettmpdir=0
|
||||
set_confluent_vars
|
||||
echo
|
||||
echo '---------------------------------------------------------------------------'
|
||||
echo Running $requestedcmd from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
|
||||
if [ -z "$confluentscripttmpdir" ]; then
|
||||
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
|
||||
unsettmpdir=1
|
||||
fi
|
||||
echo Executing in $confluentscripttmpdir
|
||||
cd $confluentscripttmpdir
|
||||
fetch_remote $1
|
||||
if [ $? != 0 ]; then echo $requestedcmd failed to download; return 1; fi
|
||||
chmod +x $1
|
||||
cmd=$1
|
||||
if [ -x /usr/bin/chcon ]; then
|
||||
chcon system_u:object_r:bin_t:s0 $cmd
|
||||
fi
|
||||
shift
|
||||
./$cmd $*
|
||||
retcode=$?
|
||||
if [ $retcode -ne 0 ]; then
|
||||
echo "$requestedcmd exited with code $retcode"
|
||||
fi
|
||||
cd - > /dev/null
|
||||
if [ "$unsettmpdir" = 1 ]; then
|
||||
rm -rf $confluentscripttmpdir
|
||||
unset confluentscripttmpdir
|
||||
unsettmpdir=0
|
||||
fi
|
||||
return $retcode
|
||||
}
|
||||
|
||||
run_remote_python() {
|
||||
echo
|
||||
set_confluent_vars
|
||||
if [ -f /etc/confluent/ca.pem ]; then
|
||||
curlargs=" --cacert /etc/confluent/ca.pem"
|
||||
fi
|
||||
echo '---------------------------------------------------------------------------'
|
||||
echo Running python script "'$*'" from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
|
||||
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
|
||||
echo Executing in $confluentscripttmpdir
|
||||
cd $confluentscripttmpdir
|
||||
mkdir -p $(dirname $1)
|
||||
curl -f -sS $curlargs https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/$1 > $1
|
||||
if [ $? != 0 ]; then echo "'$*'" failed to download; return 1; fi
|
||||
confluentpython $*
|
||||
retcode=$?
|
||||
echo "'$*' exited with code $retcode"
|
||||
cd - > /dev/null
|
||||
rm -rf $confluentscripttmpdir
|
||||
unset confluentscripttmpdir
|
||||
return $retcode
|
||||
}
|
||||
|
||||
run_remote_config() {
|
||||
echo
|
||||
set_confluent_vars
|
||||
apiclient=/opt/confluent/bin/apiclient
|
||||
if [ -f /etc/confluent/apiclient ]; then
|
||||
apiclient=/etc/confluent/apiclient
|
||||
fi
|
||||
echo '---------------------------------------------------------------------------'
|
||||
echo Requesting to run remote configuration for "'$*'" from $confluent_mgr under profile $confluent_profile
|
||||
confluentpython $apiclient /confluent-api/self/remoteconfig/"$*" -d {}
|
||||
confluentpython $apiclient /confluent-api/self/remoteconfig/status -w 204
|
||||
echo
|
||||
echo 'Completed remote configuration'
|
||||
echo '---------------------------------------------------------------------------'
|
||||
return
|
||||
}
|
||||
#If invoked as a command, use the arguments to actually run a function
|
||||
(return 0 2>/dev/null) || $1 "${@:2}"
|
@ -0,0 +1,93 @@
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
class DiskInfo(object):
|
||||
def __init__(self, devname):
|
||||
self.name = devname
|
||||
self.wwn = None
|
||||
self.path = None
|
||||
self.model = ''
|
||||
self.size = 0
|
||||
self.driver = None
|
||||
self.mdcontainer = ''
|
||||
devnode = '/dev/{0}'.format(devname)
|
||||
qprop = subprocess.check_output(
|
||||
['udevadm', 'info', '--query=property', devnode])
|
||||
if not isinstance(qprop, str):
|
||||
qprop = qprop.decode('utf8')
|
||||
for prop in qprop.split('\n'):
|
||||
if '=' not in prop:
|
||||
continue
|
||||
k, v = prop.split('=', 1)
|
||||
if k == 'DEVTYPE' and v != 'disk':
|
||||
raise Exception('Not a disk')
|
||||
elif k == 'DM_NAME':
|
||||
raise Exception('Device Mapper')
|
||||
elif k == 'ID_MODEL':
|
||||
self.model = v
|
||||
elif k == 'DEVPATH':
|
||||
self.path = v
|
||||
elif k == 'ID_WWN':
|
||||
self.wwn = v
|
||||
elif k == 'MD_CONTAINER':
|
||||
self.mdcontainer = v
|
||||
attrs = subprocess.check_output(['udevadm', 'info', '-a', devnode])
|
||||
if not isinstance(attrs, str):
|
||||
attrs = attrs.decode('utf8')
|
||||
for attr in attrs.split('\n'):
|
||||
if '==' not in attr:
|
||||
continue
|
||||
k, v = attr.split('==', 1)
|
||||
k = k.strip()
|
||||
if k == 'ATTRS{size}':
|
||||
self.size = v.replace('"', '')
|
||||
elif (k == 'DRIVERS' and not self.driver
|
||||
and v not in ('"sd"', '""')):
|
||||
self.driver = v.replace('"', '')
|
||||
if not self.driver and 'imsm' not in self.mdcontainer:
|
||||
raise Exception("No driver detected")
|
||||
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
|
||||
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
|
||||
self.size = int(sizesrc.read()) * 512
|
||||
if int(self.size) < 536870912:
|
||||
raise Exception("Device too small for install")
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
if self.model.lower() in ('thinksystem_m.2_vd', 'thinksystem m.2', 'thinksystem_m.2'):
|
||||
return 0
|
||||
if 'imsm' in self.mdcontainer:
|
||||
return 1
|
||||
if self.driver == 'ahci':
|
||||
return 2
|
||||
if self.driver.startswith('megaraid'):
|
||||
return 3
|
||||
if self.driver.startswith('mpt'):
|
||||
return 4
|
||||
return 99
|
||||
|
||||
def __repr__(self):
|
||||
return repr({
|
||||
'name': self.name,
|
||||
'path': self.path,
|
||||
'wwn': self.wwn,
|
||||
'driver': self.driver,
|
||||
'size': self.size,
|
||||
'model': self.model,
|
||||
})
|
||||
|
||||
|
||||
def main():
|
||||
disks = []
|
||||
for disk in sorted(os.listdir('/sys/class/block')):
|
||||
try:
|
||||
disk = DiskInfo(disk)
|
||||
disks.append(disk)
|
||||
except Exception as e:
|
||||
print("Skipping {0}: {1}".format(disk, str(e)))
|
||||
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
|
||||
if nd:
|
||||
open('/tmp/installdisk', 'w').write(nd[0])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,376 @@
|
||||
#!/usr/bin/python3
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import shutil
|
||||
import socket
|
||||
import stat
|
||||
import struct
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
def get_next_part_meta(img, imgsize):
|
||||
if img.tell() == imgsize:
|
||||
return None
|
||||
pathlen = struct.unpack('!H', img.read(2))[0]
|
||||
mountpoint = img.read(pathlen).decode('utf8')
|
||||
jsonlen = struct.unpack('!I', img.read(4))[0]
|
||||
metadata = json.loads(img.read(jsonlen).decode('utf8'))
|
||||
img.seek(16, 1) # skip the two 64-bit values we don't use, they are in json
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip filesystem type
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip orig devname (redundant with json)
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip padding
|
||||
nextlen = struct.unpack('!Q', img.read(8))[0]
|
||||
img.seek(nextlen, 1) # go to next section
|
||||
return metadata
|
||||
|
||||
def get_multipart_image_meta(img):
|
||||
img.seek(0, 2)
|
||||
imgsize = img.tell()
|
||||
img.seek(16)
|
||||
seekamt = img.read(1)
|
||||
img.seek(struct.unpack('B', seekamt)[0], 1)
|
||||
partinfo = get_next_part_meta(img, imgsize)
|
||||
while partinfo:
|
||||
yield partinfo
|
||||
partinfo = get_next_part_meta(img, imgsize)
|
||||
|
||||
def get_image_metadata(imgpath):
|
||||
with open(imgpath, 'rb') as img:
|
||||
header = img.read(16)
|
||||
if header == b'\x63\x7b\x9d\x26\xb7\xfd\x48\x30\x89\xf9\x11\xcf\x18\xfd\xff\xa1':
|
||||
for md in get_multipart_image_meta(img):
|
||||
yield md
|
||||
else:
|
||||
raise Exception('Installation from single part image not supported')
|
||||
|
||||
class PartedRunner():
|
||||
def __init__(self, disk):
|
||||
self.disk = disk
|
||||
|
||||
def run(self, command):
|
||||
command = command.split()
|
||||
command = ['parted', '-a', 'optimal', '-s', self.disk] + command
|
||||
return subprocess.check_output(command).decode('utf8')
|
||||
|
||||
def fixup(rootdir, vols):
|
||||
devbymount = {}
|
||||
for vol in vols:
|
||||
devbymount[vol['mount']] = vol['targetdisk']
|
||||
fstabfile = os.path.join(rootdir, 'etc/fstab')
|
||||
with open(fstabfile) as tfile:
|
||||
fstab = tfile.read().split('\n')
|
||||
while not fstab[0]:
|
||||
fstab = fstab[1:]
|
||||
if os.path.exists(os.path.join(rootdir, '.autorelabel')):
|
||||
os.unlink(os.path.join(rootdir, '.autorelabel'))
|
||||
with open(fstabfile, 'w') as tfile:
|
||||
for tab in fstab:
|
||||
entry = tab.split()
|
||||
if tab.startswith('#ORIGFSTAB#'):
|
||||
if entry[1] in devbymount:
|
||||
targetdev = devbymount[entry[1]]
|
||||
if targetdev.startswith('/dev/localstorage/'):
|
||||
entry[0] = targetdev
|
||||
else:
|
||||
uuid = subprocess.check_output(['blkid', '-s', 'UUID', '-o', 'value', targetdev]).decode('utf8')
|
||||
uuid = uuid.strip()
|
||||
entry[0] = 'UUID={}'.format(uuid)
|
||||
elif entry[2] == 'swap':
|
||||
entry[0] = '/dev/mapper/localstorage-swap'
|
||||
entry[0] = entry[0].ljust(42)
|
||||
entry[1] = entry[1].ljust(16)
|
||||
entry[3] = entry[3].ljust(28)
|
||||
tab = '\t'.join(entry)
|
||||
tfile.write(tab + '\n')
|
||||
with open(os.path.join(rootdir, 'etc/hostname'), 'w') as nameout:
|
||||
nameout.write(socket.gethostname() + '\n')
|
||||
selinuxconfig = os.path.join(rootdir, 'etc/selinux/config')
|
||||
policy = None
|
||||
if os.path.exists(selinuxconfig):
|
||||
with open(selinuxconfig) as cfgin:
|
||||
sec = cfgin.read().split('\n')
|
||||
for l in sec:
|
||||
l = l.split('#', 1)[0]
|
||||
if l.startswith('SELINUXTYPE='):
|
||||
_, policy = l.split('=')
|
||||
for sshkey in glob.glob(os.path.join(rootdir, 'etc/ssh/*_key*')):
|
||||
os.unlink(sshkey)
|
||||
for sshkey in glob.glob('/etc/ssh/*_key*'):
|
||||
newkey = os.path.join(rootdir, sshkey[1:])
|
||||
shutil.copy2(sshkey, newkey)
|
||||
finfo = os.stat(sshkey)
|
||||
os.chown(newkey, finfo[stat.ST_UID], finfo[stat.ST_GID])
|
||||
for ifcfg in glob.glob(os.path.join(rootdir, 'etc/sysconfig/network-scripts/*')):
|
||||
os.unlink(ifcfg)
|
||||
for ifcfg in glob.glob(os.path.join(rootdir, 'etc/NetworkManager/system-connections/*')):
|
||||
os.unlink(ifcfg)
|
||||
for ifcfg in glob.glob('/run/NetworkManager/system-connections/*'):
|
||||
newcfg = ifcfg.split('/')[-1]
|
||||
newcfg = os.path.join(rootdir, 'etc/NetworkManager/system-connections/{0}'.format(newcfg))
|
||||
shutil.copy2(ifcfg, newcfg)
|
||||
shutil.rmtree(os.path.join(rootdir, 'etc/confluent/'))
|
||||
shutil.copytree('/etc/confluent', os.path.join(rootdir, 'etc/confluent'))
|
||||
if policy:
|
||||
sys.stdout.write('Applying SELinux labeling...')
|
||||
sys.stdout.flush()
|
||||
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'etc')])
|
||||
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'opt')])
|
||||
sys.stdout.write('Done\n')
|
||||
sys.stdout.flush()
|
||||
for metafs in ('proc', 'sys', 'dev'):
|
||||
subprocess.check_call(['mount', '-o', 'bind', '/{}'.format(metafs), os.path.join(rootdir, metafs)])
|
||||
with open(os.path.join(rootdir, 'etc/sysconfig/grub')) as defgrubin:
|
||||
defgrub = defgrubin.read().split('\n')
|
||||
with open(os.path.join(rootdir, 'etc/sysconfig/grub'), 'w') as defgrubout:
|
||||
for gline in defgrub:
|
||||
gline = gline.split()
|
||||
newline = []
|
||||
for ent in gline:
|
||||
if ent.startswith('resume=') or ent.startswith('rd.lvm.lv'):
|
||||
continue
|
||||
newline.append(ent)
|
||||
defgrubout.write(' '.join(newline) + '\n')
|
||||
grubcfg = subprocess.check_output(['find', os.path.join(rootdir, 'boot'), '-name', 'grub.cfg']).decode('utf8').strip().replace(rootdir, '/')
|
||||
subprocess.check_call(['chroot', rootdir, 'grub2-mkconfig', '-o', grubcfg])
|
||||
newroot = None
|
||||
with open('/etc/shadow') as shadowin:
|
||||
shents = shadowin.read().split('\n')
|
||||
for shent in shents:
|
||||
shent = shent.split(':')
|
||||
if not shent:
|
||||
continue
|
||||
if shent[0] == 'root' and shent[1] not in ('*', '!!', ''):
|
||||
newroot = shent[1]
|
||||
if newroot:
|
||||
shlines = None
|
||||
with open(os.path.join(rootdir, 'etc/shadow')) as oshadow:
|
||||
shlines = oshadow.read().split('\n')
|
||||
with open(os.path.join(rootdir, 'etc/shadow'), 'w') as oshadow:
|
||||
for line in shlines:
|
||||
if line.startswith('root:'):
|
||||
line = line.split(':')
|
||||
line[1] = newroot
|
||||
line = ':'.join(line)
|
||||
oshadow.write(line + '\n')
|
||||
partnum = None
|
||||
targblock = None
|
||||
for vol in vols:
|
||||
if vol['mount'] == '/boot/efi':
|
||||
targdev = vol['targetdisk']
|
||||
partnum = re.search('(\d+)$', targdev).group(1)
|
||||
targblock = re.search('(.*)\d+$', targdev).group(1)
|
||||
if targblock:
|
||||
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
|
||||
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
|
||||
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
|
||||
#other network interfaces
|
||||
|
||||
|
||||
def had_swap():
|
||||
with open('/etc/fstab') as tabfile:
|
||||
tabs = tabfile.read().split('\n')
|
||||
for tab in tabs:
|
||||
tab = tab.split()
|
||||
if len(tab) < 3:
|
||||
continue
|
||||
if tab[2] == 'swap':
|
||||
return True
|
||||
return False
|
||||
|
||||
def install_to_disk(imgpath):
|
||||
lvmvols = {}
|
||||
deftotsize = 0
|
||||
mintotsize = 0
|
||||
deflvmsize = 0
|
||||
minlvmsize = 0
|
||||
biggestsize = 0
|
||||
biggestfs = None
|
||||
plainvols = {}
|
||||
allvols = []
|
||||
swapsize = 0
|
||||
if had_swap():
|
||||
with open('/proc/meminfo') as meminfo:
|
||||
swapsize = meminfo.read().split('\n')[0]
|
||||
swapsize = int(swapsize.split()[1])
|
||||
if swapsize < 2097152:
|
||||
swapsize = swapsize * 2
|
||||
elif swapsize > 8388608 and swapsize < 67108864:
|
||||
swapsize = swapsize * 0.5
|
||||
elif swapsize >= 67108864:
|
||||
swapsize = 33554432
|
||||
swapsize = int(swapsize * 1024)
|
||||
deftotsize = swapsize
|
||||
mintotsize = swapsize
|
||||
for fs in get_image_metadata(imgpath):
|
||||
allvols.append(fs)
|
||||
deftotsize += fs['initsize']
|
||||
mintotsize += fs['minsize']
|
||||
if fs['initsize'] > biggestsize:
|
||||
biggestfs = fs
|
||||
biggestsize = fs['initsize']
|
||||
if fs['device'].startswith('/dev/mapper'):
|
||||
lvmvols[fs['device'].replace('/dev/mapper/', '')] = fs
|
||||
deflvmsize += fs['initsize']
|
||||
minlvmsize += fs['minsize']
|
||||
else:
|
||||
plainvols[int(re.search('(\d+)$', fs['device'])[0])] = fs
|
||||
with open('/tmp/installdisk') as diskin:
|
||||
instdisk = diskin.read()
|
||||
instdisk = '/dev/' + instdisk
|
||||
parted = PartedRunner(instdisk)
|
||||
dinfo = parted.run('unit s print')
|
||||
dinfo = dinfo.split('\n')
|
||||
sectors = 0
|
||||
sectorsize = 0
|
||||
for inf in dinfo:
|
||||
if inf.startswith('Disk {0}:'.format(instdisk)):
|
||||
_, sectors = inf.split(': ')
|
||||
sectors = int(sectors.replace('s', ''))
|
||||
if inf.startswith('Sector size (logical/physical):'):
|
||||
_, sectorsize = inf.split(':')
|
||||
sectorsize = sectorsize.split('/')[0]
|
||||
sectorsize = sectorsize.replace('B', '')
|
||||
sectorsize = int(sectorsize)
|
||||
# for now, only support resizing/growing the largest partition
|
||||
minexcsize = deftotsize - biggestfs['initsize']
|
||||
mintotsize = deftotsize - biggestfs['initsize'] + biggestfs['minsize']
|
||||
minsectors = mintotsize // sectorsize
|
||||
if sectors < (minsectors + 65536):
|
||||
raise Exception('Disk too small to fit image')
|
||||
biggestsectors = sectors - (minexcsize // sectorsize)
|
||||
biggestsize = sectorsize * biggestsectors
|
||||
parted.run('mklabel gpt')
|
||||
curroffset = 2048
|
||||
for volidx in sorted(plainvols):
|
||||
vol = plainvols[volidx]
|
||||
if vol is not biggestfs:
|
||||
size = vol['initsize'] // sectorsize
|
||||
else:
|
||||
size = biggestsize // sectorsize
|
||||
size += 2047 - (size % 2048)
|
||||
end = curroffset + size
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
|
||||
vol['targetdisk'] = instdisk + '{0}'.format(volidx)
|
||||
curroffset += size + 1
|
||||
if not lvmvols:
|
||||
if swapsize:
|
||||
swapsize = swapsize // sectorsize
|
||||
swapsize += 2047 - (size % 2048)
|
||||
end = curroffset + swapsize
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
|
||||
subprocess.check_call(['mkswap', instdisk + '{}'.format(volidx + 1)])
|
||||
else:
|
||||
parted.run('mkpart lvm {}s 100%'.format(curroffset))
|
||||
lvmpart = instdisk + '{}'.format(volidx + 1)
|
||||
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
|
||||
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
|
||||
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
|
||||
vginfo = vginfo.split('\n')
|
||||
pesize = 0
|
||||
pes = 0
|
||||
for infline in vginfo:
|
||||
infline = infline.split()
|
||||
if len(infline) >= 3 and infline[:2] == ['PE', 'Size']:
|
||||
pesize = int(infline[2])
|
||||
if len(infline) >= 5 and infline[:2] == ['Free', 'PE']:
|
||||
pes = int(infline[4])
|
||||
takeaway = swapsize // pesize
|
||||
for volidx in lvmvols:
|
||||
vol = lvmvols[volidx]
|
||||
if vol is biggestfs:
|
||||
continue
|
||||
takeaway += vol['initsize'] // pesize
|
||||
takeaway += 1
|
||||
biggestextents = pes - takeaway
|
||||
for volidx in lvmvols:
|
||||
vol = lvmvols[volidx]
|
||||
if vol is biggestfs:
|
||||
extents = biggestextents
|
||||
else:
|
||||
extents = vol['initsize'] // pesize
|
||||
extents += 1
|
||||
if vol['mount'] == '/':
|
||||
lvname = 'root'
|
||||
else:
|
||||
lvname = vol['mount'].replace('/', '_')
|
||||
subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, 'localstorage'])
|
||||
vol['targetdisk'] = '/dev/localstorage/{}'.format(lvname)
|
||||
if swapsize:
|
||||
subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', 'localstorage'])
|
||||
subprocess.check_call(['mkswap', '/dev/localstorage/swap'])
|
||||
os.makedirs('/run/imginst/targ')
|
||||
for vol in allvols:
|
||||
with open(vol['targetdisk'], 'wb') as partition:
|
||||
partition.write(b'\x00' * 1 * 1024 * 1024)
|
||||
subprocess.check_call(['mkfs.{}'.format(vol['filesystem']), vol['targetdisk']])
|
||||
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ'])
|
||||
source = vol['mount'].replace('/', '_')
|
||||
source = '/run/imginst/sources/' + source
|
||||
blankfsstat = os.statvfs('/run/imginst/targ')
|
||||
blankused = (blankfsstat.f_blocks - blankfsstat.f_bfree) * blankfsstat.f_bsize
|
||||
sys.stdout.write('\nWriting {0}: '.format(vol['mount']))
|
||||
with subprocess.Popen(['cp', '-ax', source + '/.', '/run/imginst/targ']) as copier:
|
||||
stillrunning = copier.poll()
|
||||
lastprogress = 0.0
|
||||
while stillrunning is None:
|
||||
currfsstat = os.statvfs('/run/imginst/targ')
|
||||
currused = (currfsstat.f_blocks - currfsstat.f_bfree) * currfsstat.f_bsize
|
||||
currused -= blankused
|
||||
with open('/proc/meminfo') as meminf:
|
||||
for line in meminf.read().split('\n'):
|
||||
if line.startswith('Dirty:'):
|
||||
_, dirty, _ = line.split()
|
||||
dirty = int(dirty) * 1024
|
||||
progress = (currused - dirty) / vol['minsize']
|
||||
if progress < lastprogress:
|
||||
progress = lastprogress
|
||||
if progress > 0.99:
|
||||
progress = 0.99
|
||||
lastprogress = progress
|
||||
progress = progress * 100
|
||||
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.5)
|
||||
stillrunning = copier.poll()
|
||||
if stillrunning != 0:
|
||||
raise Exception("Error copying volume")
|
||||
with subprocess.Popen(['sync']) as syncrun:
|
||||
stillrunning = syncrun.poll()
|
||||
while stillrunning is None:
|
||||
with open('/proc/meminfo') as meminf:
|
||||
for line in meminf.read().split('\n'):
|
||||
if line.startswith('Dirty:'):
|
||||
_, dirty, _ = line.split()
|
||||
dirty = int(dirty) * 1024
|
||||
progress = (vol['minsize'] - dirty) / vol['minsize']
|
||||
if progress < lastprogress:
|
||||
progress = lastprogress
|
||||
if progress > 0.99:
|
||||
progress = 0.99
|
||||
lastprogress = progress
|
||||
progress = progress * 100
|
||||
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.5)
|
||||
stillrunning = syncrun.poll()
|
||||
sys.stdout.write('\x1b[1K\rDone writing {0}'.format(vol['mount']))
|
||||
sys.stdout.write('\n')
|
||||
sys.stdout.flush()
|
||||
subprocess.check_call(['umount', '/run/imginst/targ'])
|
||||
for vol in allvols:
|
||||
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ/' + vol['mount']])
|
||||
fixup('/run/imginst/targ', allvols)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
install_to_disk(os.environ['mountsrc'])
|
@ -0,0 +1,126 @@
|
||||
. /lib/dracut-lib.sh
|
||||
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay
|
||||
if [ "untethered" = "$(getarg confluent_imagemethod)" ]; then
|
||||
mount -t tmpfs untethered /mnt/remoteimg
|
||||
curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs
|
||||
else
|
||||
confluent_urls="$confluent_urls https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs"
|
||||
/opt/confluent/bin/urlmount $confluent_urls /mnt/remoteimg
|
||||
fi
|
||||
/opt/confluent/bin/confluent_imginfo /mnt/remoteimg/rootimg.sfs > /tmp/rootimg.info
|
||||
loopdev=$(losetup -f)
|
||||
export mountsrc=$loopdev
|
||||
losetup -r $loopdev /mnt/remoteimg/rootimg.sfs
|
||||
if grep '^Format: confluent_crypted' /tmp/rootimg.info > /dev/null; then
|
||||
while ! curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_mgr/confluent-api/self/profileprivate/pending/rootimg.key > /tmp/rootimg.key; do
|
||||
echo "Unable to retrieve private key from $confluent_mgr (verify that confluent can access /var/lib/confluent/private/$confluent_profile/pending/rootimg.key)"
|
||||
sleep 1
|
||||
done
|
||||
cipher=$(head -n 1 /tmp/rootimg.key)
|
||||
key=$(tail -n 1 /tmp/rootimg.key)
|
||||
len=$(wc -c /mnt/remoteimg/rootimg.sfs | awk '{print $1}')
|
||||
len=$(((len-4096)/512))
|
||||
dmsetup create cryptimg --table "0 $len crypt $cipher $key 0 $loopdev 8"
|
||||
/opt/confluent/bin/confluent_imginfo /dev/mapper/cryptimg > /tmp/rootimg.info
|
||||
mountsrc=/dev/mapper/cryptimg
|
||||
fi
|
||||
|
||||
if grep '^Format: squashfs' /tmp/rootimg.info > /dev/null; then
|
||||
mount -o ro $mountsrc /mnt/remote
|
||||
elif grep '^Format: confluent_multisquash' /tmp/rootimg.info; then
|
||||
tail -n +3 /tmp/rootimg.info | awk '{gsub("/", "_"); print "echo 0 " $4 " linear '$mountsrc' " $3 " | dmsetup create mproot" $7}' > /tmp/setupmount.sh
|
||||
. /tmp/setupmount.sh
|
||||
cat /tmp/setupmount.sh |awk '{printf "mount /dev/mapper/"$NF" "; sub("mproot", ""); gsub("_", "/"); print "/mnt/remote"$NF}' > /tmp/mountparts.sh
|
||||
. /tmp/mountparts.sh
|
||||
fi
|
||||
|
||||
|
||||
#mount -t tmpfs overlay /mnt/overlay
|
||||
modprobe zram
|
||||
memtot=$(grep ^MemTotal: /proc/meminfo|awk '{print $2}')
|
||||
memtot=$((memtot/2))$(grep ^MemTotal: /proc/meminfo | awk '{print $3'})
|
||||
echo $memtot > /sys/block/zram0/disksize
|
||||
mkfs.xfs /dev/zram0 > /dev/null
|
||||
mount -o discard /dev/zram0 /mnt/overlay
|
||||
if [ ! -f /tmp/mountparts.sh ]; then
|
||||
mkdir -p /mnt/overlay/upper /mnt/overlay/work
|
||||
mount -t overlay -o upperdir=/mnt/overlay/upper,workdir=/mnt/overlay/work,lowerdir=/mnt/remote disklessroot /sysroot
|
||||
else
|
||||
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $3}'); do
|
||||
mkdir -p /mnt/overlay${srcmount}/upper /mnt/overlay${srcmount}/work
|
||||
mount -t overlay -o upperdir=/mnt/overlay${srcmount}/upper,workdir=/mnt/overlay${srcmount}/work,lowerdir=${srcmount} disklesspart /sysroot${srcmount#/mnt/remote}
|
||||
done
|
||||
fi
|
||||
mkdir -p /sysroot/etc/ssh
|
||||
mkdir -p /sysroot/etc/confluent
|
||||
mkdir -p /sysroot/root/.ssh
|
||||
cp /root/.ssh/* /sysroot/root/.ssh
|
||||
chmod 700 /sysroot/root/.ssh
|
||||
cp /etc/confluent/* /sysroot/etc/confluent/
|
||||
cp /etc/ssh/*key* /sysroot/etc/ssh/
|
||||
for pubkey in /etc/ssh/ssh_host*key.pub; do
|
||||
certfile=${pubkey/.pub/-cert.pub}
|
||||
privfile=${pubkey%.pub}
|
||||
if [ -s $certfile ]; then
|
||||
echo HostCertificate $certfile >> /sysroot/etc/ssh/sshd_config
|
||||
fi
|
||||
echo HostKey $privfile >> /sysroot/etc/ssh/sshd_config
|
||||
done
|
||||
|
||||
mkdir -p /sysroot/dev /sysroot/sys /sysroot/proc /sysroot/run
|
||||
if [ ! -z "$autocons" ]; then
|
||||
autocons=${autocons%,*}
|
||||
mkdir -p /run/systemd/generator/getty.target.wants
|
||||
ln -s /usr/lib/systemd/system/serial-getty@.service /run/systemd/generator/getty.target.wants/serial-getty@${autocons}.service
|
||||
fi
|
||||
while [ ! -e /sysroot/sbin/init ]; do
|
||||
echo "Failed to access root filesystem or it is missing /sbin/init"
|
||||
echo "System should be accessible through ssh at port 2222 with the appropriate key"
|
||||
while [ ! -e /sysroot/sbin/init ]; do
|
||||
sleep 1
|
||||
done
|
||||
done
|
||||
rootpassword=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg)
|
||||
rootpassword=${rootpassword#rootpassword: }
|
||||
if [ "$rootpassword" = "null" ]; then
|
||||
rootpassword=""
|
||||
fi
|
||||
|
||||
if [ ! -z "$rootpassword" ]; then
|
||||
sed -i "s@root:[^:]*:@root:$rootpassword:@" /sysroot/etc/shadow
|
||||
fi
|
||||
for i in /ssh/*.ca; do
|
||||
echo '@cert-authority *' $(cat $i) >> /sysroot/etc/ssh/ssh_known_hosts
|
||||
done
|
||||
echo HostbasedAuthentication yes >> /sysroot/etc/ssh/sshd_config
|
||||
echo HostbasedUsesNameFromPacketOnly yes >> /sysroot/etc/ssh/sshd_config
|
||||
echo IgnoreRhosts no >> /sysroot/etc/ssh/sshd_config
|
||||
sshconf=/sysroot/etc/ssh/ssh_config
|
||||
if [ -d /sysroot/etc/ssh/ssh_config.d/ ]; then
|
||||
sshconf=/sysroot/etc/ssh/ssh_config.d/01-confluent.conf
|
||||
fi
|
||||
echo 'Host *' >> $sshconf
|
||||
echo ' HostbasedAuthentication yes' >> $sshconf
|
||||
echo ' EnableSSHKeysign yes' >> $sshconf
|
||||
echo ' HostbasedKeyTypes *ed25519*' >> $sshconf
|
||||
curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_mgr/confluent-api/self/nodelist > /sysroot/etc/ssh/shosts.equiv
|
||||
cp /sysroot/etc/ssh/shosts.equiv /sysroot/root/.shosts
|
||||
chmod 640 /sysroot/etc/ssh/*_key
|
||||
chroot /sysroot chgrp ssh_keys /etc/ssh/*_key
|
||||
cp /tls/*.pem /sysroot/etc/pki/ca-trust/source/anchors/
|
||||
chroot /sysroot/ update-ca-trust
|
||||
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/onboot.service > /sysroot/etc/systemd/system/onboot.service
|
||||
mkdir -p /sysroot/opt/confluent/bin
|
||||
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/onboot.sh > /sysroot/opt/confluent/bin/onboot.sh
|
||||
chmod +x /sysroot/opt/confluent/bin/onboot.sh
|
||||
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
|
||||
ln -s /etc/systemd/system/onboot.service /sysroot/etc/systemd/system/multi-user.target.wants/onboot.service
|
||||
cp /etc/confluent/functions /sysroot/etc/confluent/functions
|
||||
if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
. /etc/confluent/functions
|
||||
run_remote installimage
|
||||
exec reboot -f
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
exec /opt/confluent/bin/start_root
|
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
. /etc/confluent/functions
|
||||
# the image will be used to deploy itself
|
||||
# provide both access to image (for parsing metadata)
|
||||
# and existing mounts of image (to take advantage of caching)
|
||||
mount -o bind /sys /sysroot/sys
|
||||
mount -o bind /dev /sysroot/dev
|
||||
mount -o bind /proc /sysroot/proc
|
||||
mount -o bind /run /sysroot/run
|
||||
|
||||
|
||||
if [ ! -f /tmp/mountparts.sh ]; then
|
||||
mkdir -p /sysroot/run/imginst/sources/_
|
||||
mount -o bind /mnt/remote /sysroot/run/imginst/sources/_
|
||||
else
|
||||
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $2}'); do
|
||||
srcname=${srcmount#/dev/mapper/mproot}
|
||||
srcdir=$(echo $srcmount | sed -e 's!/dev/mapper/mproot!/mnt/remote!' -e 's!_!/!g')
|
||||
mkdir -p /sysroot/run/imginst/sources/$srcname
|
||||
mount -o bind $srcdir /sysroot/run/imginst/sources/$srcname
|
||||
done
|
||||
fi
|
||||
cd /sysroot/run
|
||||
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_python getinstalldisk"
|
||||
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_parts pre.d"
|
||||
if [ ! -f /sysroot/tmp/installdisk ]; then
|
||||
echo 'Unable to find a suitable installation target device, ssh to port 2222 to investigate'
|
||||
while [ ! -f /sysroot/tmp/installdisk ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
lvm vgchange -a n
|
||||
udevadm control -e
|
||||
chroot /sysroot /usr/lib/systemd/systemd-udevd --daemon
|
||||
chroot /sysroot bash -c "source /etc/confluent/functions; run_remote_python image2disk.py"
|
||||
echo "Port 22" >> /etc/ssh/sshd_config
|
||||
echo 'Match LocalPort 22' >> /etc/ssh/sshd_config
|
||||
echo ' ChrootDirectory /sysroot/run/imginst/targ' >> /etc/ssh/sshd_config
|
||||
kill -HUP $(cat /run/sshd.pid)
|
||||
|
||||
chroot /sysroot/run/imginst/targ bash -c "source /etc/confluent/functions; run_remote post.sh"
|
||||
chroot /sysroot bash -c "umount \$(tac /proc/mounts|awk '{print \$2}'|grep ^/run/imginst/targ)"
|
||||
|
0
confluent_osdeploy/el7-diskless/profiles/default/scripts/onboot.d/.gitignore
vendored
Normal file
0
confluent_osdeploy/el7-diskless/profiles/default/scripts/onboot.d/.gitignore
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Confluent onboot hook
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/confluent/bin/onboot.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -0,0 +1,33 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script is executed on each boot as it is
|
||||
# completed. It is best to edit the middle of the file as
|
||||
# noted below so custom commands are executed before
|
||||
# the script notifies confluent that install is fully complete.
|
||||
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
mkdir -p /var/log/confluent
|
||||
chmod 700 /var/log/confluent
|
||||
exec >> /var/log/confluent/confluent-onboot.log
|
||||
exec 2>> /var/log/confluent/confluent-onboot.log
|
||||
chmod 600 /var/log/confluent/confluent-onboot.log
|
||||
tail -f /var/log/confluent/confluent-onboot.log > /dev/console &
|
||||
logshowpid=$!
|
||||
|
||||
run_remote_python syncfileclient
|
||||
run_remote_python confignet
|
||||
|
||||
run_remote onboot.custom
|
||||
# onboot scripts may be placed into onboot.d, e.g. onboot.d/01-firstaction.sh, onboot.d/02-secondaction.sh
|
||||
run_remote_parts onboot.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/onboot.d/
|
||||
run_remote_config onboot.d
|
||||
|
||||
#curl -X POST -d 'status: booted' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
kill $logshowpid
|
0
confluent_osdeploy/el7-diskless/profiles/default/scripts/post.d/.gitignore
vendored
Normal file
0
confluent_osdeploy/el7-diskless/profiles/default/scripts/post.d/.gitignore
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script is executed 'chrooted' into a cloned disk target before rebooting
|
||||
#
|
||||
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
mkdir -p /var/log/confluent
|
||||
chmod 700 /var/log/confluent
|
||||
exec >> /var/log/confluent/confluent-post.log
|
||||
exec 2>> /var/log/confluent/confluent-post.log
|
||||
chmod 600 /var/log/confluent/confluent-post.log
|
||||
tail -f /var/log/confluent/confluent-post.log > /dev/console &
|
||||
logshowpid=$!
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
|
||||
mkdir -p /opt/confluent/bin
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
|
||||
chmod +x /opt/confluent/bin/firstboot.sh
|
||||
systemctl enable firstboot
|
||||
selinuxpolicy=$(grep ^SELINUXTYPE /etc/selinux/config |awk -F= '{print $2}')
|
||||
if [ ! -z "$selinuxpolicy" ]; then
|
||||
setfiles /etc/selinux/${selinuxpolicy}/contexts/files/file_contexts /etc/
|
||||
fi
|
||||
run_remote_python syncfileclient
|
||||
run_remote post.custom
|
||||
# post scripts may be placed into post.d, e.g. post.d/01-firstaction.sh, post.d/02-secondaction.sh
|
||||
run_remote_parts post.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/post.d/
|
||||
run_remote_config post.d
|
||||
|
||||
curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
|
||||
kill $logshowpid
|
||||
|
@ -0,0 +1,272 @@
|
||||
#!/usr/bin/python
|
||||
import importlib
|
||||
import tempfile
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import pwd
|
||||
import grp
|
||||
from importlib.machinery import SourceFileLoader
|
||||
try:
|
||||
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
|
||||
except FileNotFoundError:
|
||||
apiclient = SourceFileLoader('apiclient', '/etc/confluent/apiclient').load_module()
|
||||
|
||||
|
||||
def partitionhostsline(line):
|
||||
comment = ''
|
||||
try:
|
||||
cmdidx = line.index('#')
|
||||
comment = line[cmdidx:]
|
||||
line = line[:cmdidx].strip()
|
||||
except ValueError:
|
||||
pass
|
||||
if not line:
|
||||
return '', [], comment
|
||||
ipaddr, names = line.split(maxsplit=1)
|
||||
names = names.split()
|
||||
return ipaddr, names, comment
|
||||
|
||||
class HostMerger(object):
|
||||
def __init__(self):
|
||||
self.byip = {}
|
||||
self.byname = {}
|
||||
self.sourcelines = []
|
||||
self.targlines = []
|
||||
|
||||
def read_source(self, sourcefile):
|
||||
with open(sourcefile, 'r') as hfile:
|
||||
self.sourcelines = hfile.read().split('\n')
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
for x in range(len(self.sourcelines)):
|
||||
line = self.sourcelines[x]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip:
|
||||
self.byip[currip] = x
|
||||
for name in names:
|
||||
self.byname[name] = x
|
||||
|
||||
def read_target(self, targetfile):
|
||||
with open(targetfile, 'r') as hfile:
|
||||
lines = hfile.read().split('\n')
|
||||
if not lines[-1]:
|
||||
lines = lines[:-1]
|
||||
for y in range(len(lines)):
|
||||
line = lines[y]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip in self.byip:
|
||||
x = self.byip[currip]
|
||||
if self.sourcelines[x] is None:
|
||||
# have already consumed this enntry
|
||||
continue
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
continue
|
||||
for name in names:
|
||||
if name in self.byname:
|
||||
x = self.byname[name]
|
||||
if self.sourcelines[x] is None:
|
||||
break
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
break
|
||||
else:
|
||||
self.targlines.append(line)
|
||||
|
||||
def write_out(self, targetfile):
|
||||
while not self.targlines[-1]:
|
||||
self.targlines = self.targlines[:-1]
|
||||
if not self.targlines:
|
||||
break
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
if not self.sourcelines:
|
||||
break
|
||||
with open(targetfile, 'w') as hosts:
|
||||
for line in self.targlines:
|
||||
hosts.write(line + '\n')
|
||||
for line in self.sourcelines:
|
||||
if line is not None:
|
||||
hosts.write(line + '\n')
|
||||
|
||||
|
||||
class CredMerger:
|
||||
def __init__(self):
|
||||
try:
|
||||
with open('/etc/login.defs', 'r') as ldefs:
|
||||
defs = ldefs.read().split('\n')
|
||||
except FileNotFoundError:
|
||||
defs = []
|
||||
lkup = {}
|
||||
self.discardnames = {}
|
||||
self.shadowednames = {}
|
||||
for line in defs:
|
||||
try:
|
||||
line = line[:line.index('#')]
|
||||
except ValueError:
|
||||
pass
|
||||
keyval = line.split()
|
||||
if len(keyval) < 2:
|
||||
continue
|
||||
lkup[keyval[0]] = keyval[1]
|
||||
self.uidmin = int(lkup.get('UID_MIN', 1000))
|
||||
self.uidmax = int(lkup.get('UID_MAX', 60000))
|
||||
self.gidmin = int(lkup.get('GID_MIN', 1000))
|
||||
self.gidmax = int(lkup.get('GID_MAX', 60000))
|
||||
self.shadowlines = None
|
||||
|
||||
def read_passwd(self, source, targfile=False):
|
||||
self.read_generic(source, self.uidmin, self.uidmax, targfile)
|
||||
|
||||
def read_group(self, source, targfile=False):
|
||||
self.read_generic(source, self.gidmin, self.gidmax, targfile)
|
||||
|
||||
def read_generic(self, source, minid, maxid, targfile):
|
||||
if targfile:
|
||||
self.targdata = []
|
||||
else:
|
||||
self.sourcedata = []
|
||||
with open(source, 'r') as inputfile:
|
||||
for line in inputfile.read().split('\n'):
|
||||
try:
|
||||
name, _, uid, _ = line.split(':', 3)
|
||||
uid = int(uid)
|
||||
except ValueError:
|
||||
continue
|
||||
if targfile:
|
||||
if uid < minid or uid > maxid:
|
||||
self.targdata.append(line)
|
||||
else:
|
||||
self.discardnames[name] = 1
|
||||
else:
|
||||
if name[0] in ('+', '#', '@'):
|
||||
self.sourcedata.append(line)
|
||||
elif uid >= minid and uid <= maxid:
|
||||
self.sourcedata.append(line)
|
||||
|
||||
def read_shadow(self, source):
|
||||
self.shadowlines = []
|
||||
try:
|
||||
with open(source, 'r') as inshadow:
|
||||
for line in inshadow.read().split('\n'):
|
||||
try:
|
||||
name, _ = line.split(':' , 1)
|
||||
except ValueError:
|
||||
continue
|
||||
if name in self.discardnames:
|
||||
continue
|
||||
self.shadowednames[name] = 1
|
||||
self.shadowlines.append(line)
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
def write_out(self, outfile):
|
||||
with open(outfile, 'w') as targ:
|
||||
for line in self.targdata:
|
||||
targ.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
targ.write(line + '\n')
|
||||
if outfile == '/etc/passwd':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/shadow')
|
||||
with open('/etc/shadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':', 1)
|
||||
if name[0] in ('+', '#', '@'):
|
||||
continue
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!:::::::\n')
|
||||
if outfile == '/etc/group':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/gshadow')
|
||||
with open('/etc/gshadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':' , 1)
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!::\n')
|
||||
|
||||
def appendonce(basepath, filename):
|
||||
with open(filename, 'rb') as filehdl:
|
||||
thedata = filehdl.read()
|
||||
targname = filename.replace(basepath, '')
|
||||
try:
|
||||
with open(targname, 'rb') as filehdl:
|
||||
targdata = filehdl.read()
|
||||
except IOError:
|
||||
targdata = b''
|
||||
if thedata in targdata:
|
||||
return
|
||||
with open(targname, 'ab') as targhdl:
|
||||
targhdl.write(thedata)
|
||||
|
||||
def synchronize():
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
appendoncedir = tempfile.mkdtemp()
|
||||
try:
|
||||
ac = apiclient.HTTPSClient()
|
||||
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir})
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
|
||||
if status == 202:
|
||||
lastrsp = ''
|
||||
while status != 204:
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
|
||||
if not isinstance(rsp, str):
|
||||
rsp = rsp.decode('utf8')
|
||||
if status == 200:
|
||||
lastrsp = rsp
|
||||
pendpasswd = os.path.join(tmpdir, 'etc/passwd')
|
||||
if os.path.exists(pendpasswd):
|
||||
cm = CredMerger()
|
||||
cm.read_passwd(pendpasswd, targfile=False)
|
||||
cm.read_passwd('/etc/passwd', targfile=True)
|
||||
cm.write_out('/etc/passwd')
|
||||
pendgroup = os.path.join(tmpdir, 'etc/group')
|
||||
if os.path.exists(pendgroup):
|
||||
cm = CredMerger()
|
||||
cm.read_group(pendgroup, targfile=False)
|
||||
cm.read_group('/etc/group', targfile=True)
|
||||
cm.write_out('/etc/group')
|
||||
pendhosts = os.path.join(tmpdir, 'etc/hosts')
|
||||
if os.path.exists(pendhosts):
|
||||
cm = HostMerger()
|
||||
cm.read_source(pendhosts)
|
||||
cm.read_target('/etc/hosts')
|
||||
cm.write_out('/etc/hosts')
|
||||
for dirn in os.walk(appendoncedir):
|
||||
for filen in dirn[2]:
|
||||
appendonce(appendoncedir, os.path.join(dirn[0], filen))
|
||||
if lastrsp:
|
||||
lastrsp = json.loads(lastrsp)
|
||||
opts = lastrsp.get('options', {})
|
||||
for fname in opts:
|
||||
uid = -1
|
||||
gid = -1
|
||||
for opt in opts[fname]:
|
||||
if opt == 'owner':
|
||||
try:
|
||||
uid = pwd.getpwnam(opts[fname][opt]['name']).pw_uid
|
||||
except KeyError:
|
||||
uid = opts[fname][opt]['id']
|
||||
elif opt == 'group':
|
||||
try:
|
||||
gid = grp.getgrnam(opts[fname][opt]['name']).gr_gid
|
||||
except KeyError:
|
||||
gid = opts[fname][opt]['id']
|
||||
elif opt == 'permissions':
|
||||
os.chmod(fname, int(opts[fname][opt], 8))
|
||||
if uid != -1 or gid != -1:
|
||||
os.chown(fname, uid, gid)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
shutil.rmtree(appendoncedir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
synchronize()
|
29
confluent_osdeploy/el7-diskless/profiles/default/syncfiles
Normal file
29
confluent_osdeploy/el7-diskless/profiles/default/syncfiles
Normal file
@ -0,0 +1,29 @@
|
||||
# It is advised to avoid /var/lib/confluent/public as a source for syncing. /var/lib/confluent/public
|
||||
# is served without authentication and thus any sensitive content would be a risk. If wanting to host
|
||||
# syncfiles on a common share, it is suggested to have /var/lib/confluent be the share and use some other
|
||||
# subdirectory other than public.
|
||||
#
|
||||
# Syncing is performed as the 'confluent' user, so all source files must be accessible by the confluent user.
|
||||
#
|
||||
# This file lists files to synchronize or merge to the deployed systems from the deployment server
|
||||
# To specify taking /some/path/hosts on the deployment server and duplicating it to /etc/hosts:
|
||||
# Note particularly the use of '->' to delineate source from target.
|
||||
# /some/path/hosts -> /etc/hosts
|
||||
|
||||
# If wanting to simply use the same path for source and destinaiton, the -> may be skipped:
|
||||
# /etc/hosts
|
||||
|
||||
# More function is available, for example to limit the entry to run only on n1 through n8, and to set
|
||||
# owner, group, and permissions in octal notation:
|
||||
# /example/source -> n1-n8:/etc/target (owner=root,group=root,permissions=600)
|
||||
|
||||
# Entries under APPENDONCE: will be added to specified target, only if the target does not already
|
||||
# contain the data in the source already in its entirety. This allows append in a fashion that
|
||||
# is friendly to being run repeatedly
|
||||
|
||||
# Entries under MERGE: will attempt to be intelligently merged. This supports /etc/group and /etc/passwd
|
||||
# Any supporting entries in /etc/shadow or /etc/gshadow are added automatically, with password disabled
|
||||
# It also will not inject 'system' ids (under 1,000 usually) as those tend to be local and rpm managed.
|
||||
MERGE:
|
||||
# /etc/passwd
|
||||
# /etc/group
|
@ -17,4 +17,4 @@ xfsprogs
|
||||
e2fsprogs
|
||||
fuse-libs
|
||||
libnl3
|
||||
chrony kernel net-tools nfs-utils openssh-server rsync tar util-linux python3 tar dracut dracut-network ethtool parted openssl dhclient openssh-clients bash vim-minimal rpm iputils clevis lvm2 efibootmgr shim-x64.x86_64 grub2-efi-x64 attr
|
||||
chrony kernel net-tools nfs-utils openssh-server rsync tar util-linux python3 tar dracut dracut-network ethtool parted openssl dhclient openssh-clients bash vim-minimal rpm iputils lvm2 efibootmgr shim-x64.x86_64 grub2-efi-x64 attr
|
||||
|
@ -340,7 +340,10 @@ def create_yumconf(sourcedir, addrepos):
|
||||
if '/' not in sourcedir:
|
||||
sourcedir = os.path.join('/var/lib/confluent/distributions', sourcedir)
|
||||
if os.path.exists(sourcedir + '/repodata'):
|
||||
pass
|
||||
yumconf.write('[genimage-topdir]\n')
|
||||
yumconf.write('name=Local install repository\n')
|
||||
yumconf.write('baseurl={0}\n'.format(sourcedir))
|
||||
yumconf.write('enabled=1\ngpgcheck=0\n\n')
|
||||
else:
|
||||
c = configparser.ConfigParser()
|
||||
c.read(sourcedir + '/.treeinfo')
|
||||
@ -548,7 +551,7 @@ class DebHandler(OsHandler):
|
||||
|
||||
class ElHandler(OsHandler):
|
||||
def __init__(self, name, version, arch, args):
|
||||
self.oscategory = 'el8'
|
||||
self.oscategory = 'el{0}'.format(version.split('.')[0])
|
||||
self.yumargs = []
|
||||
super().__init__(name, version, arch, args)
|
||||
|
||||
@ -876,7 +879,7 @@ def fingerprint_source_suse(files, sourcepath, args):
|
||||
|
||||
def fingerprint_source_el(files, sourcepath, args):
|
||||
for filen in files:
|
||||
if '-release-8' in filen:
|
||||
if '-release-8' in filen or '-release-7' in filen:
|
||||
parts = filen.split('-')
|
||||
osname = '_'.join(parts[:-3])
|
||||
if osname == 'centos_linux':
|
||||
@ -908,7 +911,7 @@ def fingerprint_host_el(args, hostpath='/'):
|
||||
ts = rpm.TransactionSet(hostpath)
|
||||
rpms = ts.dbMatch('provides', 'system-release')
|
||||
for inf in rpms:
|
||||
if 'el8' not in inf.release:
|
||||
if 'el8' not in inf.release and 'el7' not in inf.release:
|
||||
continue
|
||||
osname = inf.name.replace('-release', '').replace('-', '_')
|
||||
if osname == 'centos_linux':
|
||||
|
Loading…
Reference in New Issue
Block a user