2
0
mirror of https://github.com/xcat2/confluent.git synced 2025-03-19 09:57:45 +00:00

Merge branch 'master' into remote_discovery

This commit is contained in:
Jarrod Johnson 2022-06-21 14:56:56 -04:00
commit 559a7ca7b8
56 changed files with 2256 additions and 112 deletions

View File

@ -21,17 +21,17 @@ def create_image(directory, image, label=None):
currsz = (currsz // 512 +1) * 512
datasz += currsz
datasz += ents * 32768
datasz = datasz // 512 + 1
datasz = datasz // 4096 + 1
with open(image, 'wb') as imgfile:
imgfile.seek(datasz * 512 - 1)
imgfile.seek(datasz * 4096 - 1)
imgfile.write(b'\x00')
if label:
subprocess.check_call(['mformat', '-i', image, '-v', label,
'-r', '16', '-d', '1', '-t', str(datasz),
'-s', '1','-h', '1', '::'])
'-s', '4','-h', '2', '::'])
else:
subprocess.check_call(['mformat', '-i', image, '-r', '16', '-d', '1', '-t',
str(datasz), '-s', '1','-h', '1', '::'])
str(datasz), '-s', '4','-h', '2', '::'])
# Some clustered filesystems will have the lock from mformat
# linger after close (mformat doesn't unlock)
# do a blocking wait for shared lock and then explicitly
@ -58,4 +58,4 @@ if __name__ == '__main__':
label = None
if len(sys.argv) > 3:
label = sys.argv[3]
create_image(sys.argv[1], sys.argv[2], label)
create_image(sys.argv[1], sys.argv[2], label)

View File

@ -289,13 +289,11 @@ else:
for path in queryparms:
if options.comparedefault:
continue
rc = client.print_attrib_path(path, session, list(queryparms[path]),
rcode |= client.print_attrib_path(path, session, list(queryparms[path]),
NullOpt(), queryparms[path])
if rc:
sys.exit(rc)
if printsys == 'all' or printextbmc or printbmc or printallbmc:
if printbmc or not printextbmc:
rcode = client.print_attrib_path(
rcode |= client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/all'.format(noderange),
session, printbmc, options, attrprefix='bmc.')
if options.extra:

View File

@ -43,7 +43,7 @@ def run():
argparser.add_option('-f', '-c', '--count', type='int', default=168,
help='Number of commands to run at a time')
argparser.add_option('-s', '--substitutename',
help='Use a different name other than the nodename for ping')
help='Use a different name other than the nodename for ping, with {}, it is the entire name evaluated as an expression, otherwise it is used as a suffix')
# among other things, FD_SETSIZE limits. Besides, spawning too many
# processes can be unkind for the unaware on memory pressure and such...
(options, args) = argparser.parse_args()
@ -83,9 +83,9 @@ def run():
cmdv = ['ping', '-c', '1', '-W', '1', pingnode]
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(pingnode, cmdv, all, pipedesc)
else:
pendingexecs.append((node, cmdv))
pendingexecs.append((pingnode, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)

View File

@ -35,9 +35,12 @@ if path.startswith('/opt'):
import confluent.client as client
import confluent.sortutil as sortutil
devnull = None
def run():
global devnull
devnull = open(os.devnull, 'rb')
argparser = optparse.OptionParser(
usage="Usage: %prog [options] <noderange> <command expression>",
epilog="Expressions are the same as in attributes, e.g. "
@ -133,7 +136,7 @@ def run():
def run_cmdv(node, cmdv, all, pipedesc):
try:
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmdv, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
if e.errno == 2:
sys.stderr.write('{0}: Unable to find local executable file "{1}"'.format(node, cmdv[0]))

View File

@ -34,10 +34,11 @@ if path.startswith('/opt'):
import confluent.client as client
import confluent.sortutil as sortutil
devnull = None
def run():
global devnull
devnull = open(os.devnull, 'rb')
argparser = optparse.OptionParser(
usage="Usage: %prog [options] noderange commandexpression",
epilog="Expressions are the same as in attributes, e.g. "
@ -172,7 +173,7 @@ def run():
def run_cmdv(node, cmdv, all, pipedesc):
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmdv, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,

View File

@ -19,8 +19,12 @@ import array
import ctypes
import ctypes.util
import confluent.tlv as tlv
import eventlet.green.socket as socket
import eventlet.green.select as select
try:
import eventlet.green.socket as socket
import eventlet.green.select as select
except ImportError:
import socket
import select
from datetime import datetime
import json
import os
@ -240,8 +244,12 @@ def recv(handle):
cdata = cmsgarr[CMSG_LEN(0).value:]
data = rawbuffer[:i]
if cmsg.cmsg_level == socket.SOL_SOCKET and cmsg.cmsg_type == SCM_RIGHTS:
filehandles.fromstring(bytes(
cdata[:len(cdata) - len(cdata) % filehandles.itemsize]))
try:
filehandles.fromstring(bytes(
cdata[:len(cdata) - len(cdata) % filehandles.itemsize]))
except AttributeError:
filehandles.frombytes(bytes(
cdata[:len(cdata) - len(cdata) % filehandles.itemsize]))
data = json.loads(bytes(data))
return ClientFile(data['filename'], data['mode'], filehandles[0])
else:

View File

@ -24,25 +24,17 @@ a confluent server.
%setup -n %{name}-%{version} -n %{name}-%{version}
%build
%if "%{dist}" == ".el8"
python3 setup.py build
%else
%if "%{dist}" == ".el9"
python3 setup.py build
%else
%if "%{dist}" == ".el7"
python2 setup.py build
%endif
%else
python3 setup.py build
%endif
%install
%if "%{dist}" == ".el8"
python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES --install-scripts=/opt/confluent/bin --install-purelib=/opt/confluent/lib/python
%else
%if "%{dist}" == ".el9"
python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES --install-scripts=/opt/confluent/bin --install-purelib=/opt/confluent/lib/python
%else
%if "%{dist}" == ".el7"
python2 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES --install-scripts=/opt/confluent/bin --install-purelib=/opt/confluent/lib/python
%endif
%else
python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES --install-scripts=/opt/confluent/bin --install-purelib=/opt/confluent/lib/python
%endif
@ -52,3 +44,4 @@ rm -rf $RPM_BUILD_ROOT
%files -f INSTALLED_FILES
%license /opt/confluent/share/licenses/confluent_client/LICENSE
%defattr(-,root,root)
/opt/confluent

View File

@ -22,6 +22,11 @@ a new session.
Use tmux to arrange consoles of the given noderange into a tiled layout on
the terminal screen
* `-l`, `--log`:
Perform a log reply on the current, local log in /var/log/confluent/consoles.
If in collective mode, this only makes sense to use on the current collective
manager at this time.
## ESCAPE SEQUENCE COMMANDS
While connected to a console, a number of commands may be performed through escape

View File

@ -14,7 +14,9 @@ It can also be used with the `-s` flag to change the ping location to something
* `-h`, `--help`:
Show help message and exit
* `-s` SUBSTITUTENAME, --substitutename=SUBSTITUTENAME
Use a different name other than the nodename for ping
Use a different name other than the nodename for ping. This may be a
expression, such as {bmc} or, if no { character is present, it is treated as a suffix. -s -eth1 would make n1 become n1-eth1, for example.
## EXAMPLES
* Pinging a node :
@ -31,6 +33,13 @@ It can also be used with the `-s` flag to change the ping location to something
`# nodeping -s {bmc} <noderange>`
` Node-bmc : ping`
* Pinging by specifying a suffix:
`# nodeping d1-d4 -s -eth1`
`d2-eth1: no_ping`
`d1-eth1: no_ping`
`d3-eth1: no_ping`
`d4-eth1: no_ping`
* Fail to ping node:
`# nodeping <node>`
`node : no_ping`

View File

@ -132,8 +132,11 @@ def scan_confluents():
current['mgtiface'] = line.replace(b'MGTIFACE: ', b'').strip().decode('utf8')
if len(peer) > 2:
current['myidx'] = peer[-1]
srvs[peer[0]] = current
srvlist.append(peer[0])
currip = peer[0]
if currip.startswith('fe80::') and '%' not in currip:
currip = '{0}%{1}'.format(currip, peer[-1])
srvs[currip] = current
srvlist.append(currip)
r = select.select((s4, s6), (), (), 2)
if r:
r = r[0]

View File

@ -29,7 +29,6 @@ cp confluent_imginfo copernicus clortho autocons ../opt/confluent/bin
cp start_root urlmount ../stateless-bin/
cd ..
ln -s el8 el9
ln -s el8-diskless el9-diskless
for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 coreos el9; do
mkdir ${os}out
cd ${os}out

View File

@ -12,7 +12,7 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
cd $tmnt
deploysrvs=$(sed -n '/^deploy_servers:/, /^[^-]/p' cnflnt.yml |grep ^-|sed -e 's/^- //'|grep -v :)
nodename=$(grep ^nodename: cnflnt.yml|awk '{print $2}')
sed -n '/^net_cfgs:/, /^[^- ]/p' cnflnt.yml |grep '^[ -]'|sed -n '/^-/, /^-/p'|head -n -1 | sed -e 's/^[- ]*//'> $tcfg
sed -n '/^net_cfgs:/, /^[^- ]/{/^[^- ]/!p}' cnflnt.yml |sed -n '/^-/, /^-/{/^-/!p}'| sed -e 's/^[- ]*//'> $tcfg
autoconfigmethod=$(grep ^ipv4_method: $tcfg)
autoconfigmethod=${autoconfigmethod#ipv4_method: }
if [ "$autoconfigmethod" = "dhcp" ]; then
@ -66,6 +66,7 @@ while ! grep ^NODE /etc/confluent/confluent.info; do
if ! grep ^NODE /etc/confluent/confluent.info; then
echo 'Current net config:' > /dev/console
ip -br a > /dev/console
exit 1
fi
done
echo "Found confluent deployment services on local network" > /dev/console

View File

@ -20,3 +20,4 @@ for i in /ssh/*.ca; do
done
mkdir -p /sysroot/opt/confluent/bin
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
cp /opt/confluent/bin/apiclient /sysroot/etc/confluent/

View File

@ -79,5 +79,9 @@ if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then
echo ignoredisk --only-use $(cat /tmp/installdisk) >> /tmp/partitioning
echo autopart --nohome $LUKSPARTY >> /tmp/partitioning
fi
python /etc/confluent/apiclient /confluent-public/os/$confluent_profile/kickstart.custom -o /tmp/kickstart.custom
if [ -f /opt/confluent/bin/apiclient ]; then
python /opt/confluent/bin/apiclient /confluent-public/os/$confluent_profile/kickstart.custom -o /tmp/kickstart.custom
else
python /etc/confluent/apiclient /confluent-public/os/$confluent_profile/kickstart.custom -o /tmp/kickstart.custom
fi
kill $logshowpid

View File

@ -10,6 +10,8 @@
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
export confluent_mgr confluent_profile nodename
cp -a /etc/confluent /mnt/sysimage/etc
mkdir -p /mnt/sysimage/opt/confluent/bin
cp /opt/confluent/bin/apiclient /mnt/sysimage/opt/confluent/bin/
chmod -R og-rwx /mnt/sysimage/etc/confluent
cp /tmp/functions /mnt/sysimage/etc/confluent/
hostnamectl set-hostname $nodename

View File

@ -3,9 +3,13 @@ try:
except ImportError:
import ConfigParser as configparser
import cStringIO
import imp
import importlib.util
import importlib.machinery
import sys
apiclient = imp.load_source('apiclient', '/opt/confluent/bin/apiclient')
modloader = importlib.machinery.SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient')
modspec = importlib.util.spec_from_file_location('apiclient', '/opt/confluent/bin/apiclient', loader=modloader)
apiclient = importlib.util.module_from_spec(modspec)
modspec.loader.exec_module(apiclient)
repo = None
server = None
profile = None

View File

@ -42,11 +42,20 @@ echo lang $locale > /tmp/langinfo
echo keyboard --vckeymap=$keymap >> /tmp/langinfo
tz=$(grep ^timezone: /etc/confluent/confluent.deploycfg)
tz=${tz#timezone: }
MVER=$(grep VERSION_ID /etc/os-release|cut -d = -f 2 |cut -d . -f 1|cut -d '"' -f 2)
ntpsrvs=""
if grep ^ntpservers: /etc/confluent/confluent.deploycfg > /dev/null; then
ntpsrvs="--ntpservers="$(sed -n '/^ntpservers:/,/^[^-]/p' /etc/confluent/confluent.deploycfg|sed 1d|sed '$d' | sed -e 's/^- //' | paste -sd,)
if [ "$MVER" -ge 9 ]; then
if grep ^ntpservers: /etc/confluent/confluent.deploycfg > /dev/null; then
for ntpsrv in $(sed -n '/^ntpservers:/,/^[^-]/p' /etc/confluent/confluent.deploycfg|sed 1d|sed '$d' | sed -e 's/^- //'); do
echo timesource --ntp-server $ntpsrv >> /tmp/timezone
done
fi
else
if grep ^ntpservers: /etc/confluent/confluent.deploycfg > /dev/null; then
ntpsrvs="--ntpservers="$(sed -n '/^ntpservers:/,/^[^-]/p' /etc/confluent/confluent.deploycfg|sed 1d|sed '$d' | sed -e 's/^- //' | paste -sd,)
fi
fi
echo timezone $ntpsrvs $tz --utc > /tmp/timezone
echo timezone $ntpsrvs $tz --utc >> /tmp/timezone
rootpw=$(grep ^rootpassword /etc/confluent/confluent.deploycfg | awk '{print $2}')
if [ "$rootpw" = null ]; then
echo "rootpw --lock" > /tmp/rootpw

View File

@ -0,0 +1,254 @@
get_remote_apikey() {
while [ -z "$confluent_apikey" ]; do
/opt/confluent/bin/clortho $nodename $confluent_mgr > /etc/confluent/confluent.apikey
if grep ^SEALED: /etc/confluent/confluent.apikey > /dev/null; then
# we don't support remote sealed api keys anymore
echo > /etc/confluent/confluent.apikey
fi
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
if [ -z "$confluent_apikey" ]; then
echo "Unable to acquire node api key, set deployment.apiarmed=once on node '$nodename', retrying..."
if [ ! -z "$autoconsdev" ]; then echo "Unable to acquire node api key, set deployment.apiarmed=once on node '$nodename', retrying..." > $autoconsdev; fi
sleep 10
elif [ -c /dev/tpmrm0 ]; then
tmpdir=$(mktemp -d)
cd $tmpdir
tpm2_startauthsession --session=session.ctx
tpm2_policypcr -Q --session=session.ctx --pcr-list="sha256:15" --policy=pcr15.sha256.policy
tpm2_createprimary -G ecc -Q --key-context=prim.ctx
(echo -n "CONFLUENT_APIKEY:";cat /etc/confluent/confluent.apikey) | tpm2_create -Q --policy=pcr15.sha256.policy --public=data.pub --private=data.priv -i - -C prim.ctx
tpm2_load -Q --parent-context=prim.ctx --public=data.pub --private=data.priv --name=confluent.apikey --key-context=data.ctx
tpm2_evictcontrol -Q -c data.ctx
tpm2_flushcontext session.ctx
cd - > /dev/null
rm -rf $tmpdir
fi
done
}
root=1
rootok=1
netroot=confluent
#clear
mkdir -p /etc/ssh
mkdir -p /var/tmp/
mkdir -p /var/empty/sshd
mkdir -p /usr/share/empty.sshd
mkdir -p /etc/confluent
sed -i '/^root:x/d' /etc/passwd
echo root:x:0:0::/:/bin/bash >> /etc/passwd
echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd
if ! grep console= /proc/cmdline >& /dev/null; then
autocons=$(/opt/confluent/bin/autocons)
autoconsdev=${autocons%,*}
autocons=${autocons##*/}
echo "Automatic console configured for $autocons"
fi
echo "Initializing confluent diskless environment"
echo -n "udevd: "
/usr/lib/systemd/systemd-udevd --daemon
echo -n "Loading drivers..."
udevadm trigger
udevadm trigger --type=devices --action=add
udevadm settle
modprobe ib_ipoib
modprobe ib_umad
modprobe hfi1
modprobe mlx5_ib
echo "done"
cat > /etc/ssh/sshd_config << EOF
Port 2222
Subsystem sftp /usr/libexec/openssh/sftp-server
PermitRootLogin yes
AuthorizedKeysFile .ssh/authorized_keys
EOF
mkdir /root/.ssh
mkdir /.ssh
cat /ssh/*pubkey > /root/.ssh/authorized_keys 2>/dev/null
cp /root/.ssh/authorized_keys /.ssh/
cat /tls/*.pem > /etc/confluent/ca.pem
mkdir -p /etc/pki/tls/certs
cat /tls/*.pem > /etc/pki/tls/certs/ca-bundle.crt
TRIES=0
oldumask=$(umask)
umask 0077
tpmdir=$(mktemp -d)
cd $tpmdir
lasthdl=""
if [ -c /dev/tpmrm0 ]; then
for hdl in $(tpm2_getcap handles-persistent|awk '{print $2}'); do
tpm2_startauthsession --policy-session --session=session.ctx
tpm2_policypcr -Q --session=session.ctx --pcr-list="sha256:15" --policy=pcr15.sha256.policy
unsealeddata=$(tpm2_unseal --auth=session:session.ctx -Q -c $hdl 2>/dev/null)
tpm2_flushcontext session.ctx
if [[ $unsealeddata == "CONFLUENT_APIKEY:"* ]]; then
confluent_apikey=${unsealeddata#CONFLUENT_APIKEY:}
echo $confluent_apikey > /etc/confluent/confluent.apikey
if [ -n "$lasthdl" ]; then
tpm2_evictcontrol -c $lasthdl
fi
lasthdl=$hdl
fi
done
fi
cd - > /dev/null
rm -rf $tpmdir
touch /etc/confluent/confluent.info
cd /sys/class/net
echo -n "Scanning for network configuration..."
while ! grep ^EXTMGRINFO: /etc/confluent/confluent.info | awk -F'|' '{print $3}' | grep 1 >& /dev/null && [ "$TRIES" -lt 30 ]; do
TRIES=$((TRIES + 1))
for i in *; do
ip link set $i up
done
/opt/confluent/bin/copernicus -t > /etc/confluent/confluent.info
done
cd /
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
hostname $nodename
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | head -n 1 | awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^MANAGER: /etc/confluent/confluent.info|head -n 1 | awk '{print $2}')
fi
if [[ $confluent_mgr == *%* ]]; then
echo $confluent_mgr | awk -F% '{print $2}' > /tmp/confluent.ifidx
ifidx=$(cat /tmp/confluent.ifidx)
ifname=$(ip link |grep ^$ifidx:|awk '{print $2}')
ifname=${ifname%:}
fi
ready=0
while [ $ready = "0" ]; do
get_remote_apikey
if [[ $confluent_mgr == *:* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
tmperr=$(mktemp)
curl -sSf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/deploycfg > /etc/confluent/confluent.deploycfg 2> $tmperr
if grep 401 $tmperr > /dev/null; then
confluent_apikey=""
if [ -n "$lasthdl" ]; then
tpm2_evictcontrol -c $lasthdl
fi
confluent_mgr=${confluent_mgr#[}
confluent_mgr=${confluent_mgr%]}
elif grep 'SSL' $tmperr > /dev/null; then
confluent_mgr=${confluent_mgr#[}
confluent_mgr=${confluent_mgr%]}
echo 'Failure establishing TLS conneection to '$confluent_mgr' (try `osdeploy initialize -t` on the deployment server)'
if [ ! -z "$autoconsdev" ]; then echo 'Failure establishing TLS conneection to '$confluent_mgr' (try `osdeploy initialize -t` on the deployment server)' > $autoconsdev; fi
sleep 10
else
ready=1
fi
rm $tmperr
done
if [ ! -z "$autocons" ] && grep "textconsole: true" /etc/confluent/confluent.deploycfg > /dev/null; then /opt/confluent/bin/autocons -c > /dev/null; fi
if [ -c /dev/tpmrm0 ]; then
tpm2_pcrextend 15:sha256=2fbe96c50dde38ce9cd2764ddb79c216cfbcd3499568b1125450e60c45dd19f2
fi
umask $oldumask
autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
if [ "$autoconfigmethod" = "dhcp" ]; then
echo -n "Attempting to use dhcp to bring up $ifname..."
dhclient $ifname
echo "Complete:"
ip addr show dev $ifname
else
v4addr=$(grep ^ipv4_address: /etc/confluent/confluent.deploycfg)
v4addr=${v4addr#ipv4_address: }
v4gw=$(grep ^ipv4_gateway: /etc/confluent/confluent.deploycfg)
v4gw=${v4gw#ipv4_gateway: }
if [ "$v4gw" = "null" ]; then
v4gw=""
fi
v4nm=$(grep ^prefix: /etc/confluent/confluent.deploycfg)
v4nm=${v4nm#prefix: }
echo "Setting up $ifname as static at $v4addr/$v4nm"
ip addr add dev $ifname $v4addr/$v4nm
if [ ! -z "$v4gw" ]; then
ip route add default via $v4gw
fi
mkdir -p /run/NetworkManager/system-connections
cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
[connection]
EOC
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
type=ethernet
autoconnect-retries=1
EOC
echo interface-name=$ifname >> /run/NetworkManager/system-connections/$ifname.nmconnection
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
multi-connect=1
permissions=
wait-device-timeout=60000
[ethernet]
mac-address-blacklist=
[ipv4]
EOC
echo address1=$v4addr/$v4nm >> /run/NetworkManager/system-connections/$ifname.nmconnection
if [ ! -z "$v4gw" ]; then
echo gateway=$v4gw >> /run/NetworkManager/system-connections/$ifname.nmconnection
fi
nameserversec=0
nameservers=""
while read -r entry; do
if [ $nameserversec = 1 ]; then
if [[ $entry == "-"* ]]; then
nameservers="$nameservers"${entry#- }";"
continue
fi
fi
nameserversec=0
if [ "${entry%:*}" = "nameservers" ]; then
nameserversec=1
continue
fi
done < /etc/confluent/confluent.deploycfg
echo dns=$nameservers >> /run/NetworkManager/system-connections/$ifname.nmconnection
dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)
dnsdomain=${dnsdomain#dnsdomain: }
echo dns-search=$dnsdomain >> /run/NetworkManager/system-connections/$ifname.nmconnection
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
may-fail=false
method=manual
[ipv6]
addr-gen-mode=eui64
method=auto
[proxy]
EOC
fi
chmod 600 /run/NetworkManager/system-connections/*.nmconnection
echo -n "Initializing ssh..."
ssh-keygen -A
for pubkey in /etc/ssh/ssh_host*key.pub; do
certfile=${pubkey/.pub/-cert.pub}
privfile=${pubkey%.pub}
curl -sf -X POST -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -d @$pubkey https://$confluent_mgr/confluent-api/self/sshcert > $certfile
if [ -s $certfile ]; then
echo HostCertificate $certfile >> /etc/ssh/sshd_config
fi
echo HostKey $privfile >> /etc/ssh/sshd_config
done
/usr/sbin/sshd
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg| awk '{print $2}')
confluent_proto=$(grep ^protocol: /etc/confluent/confluent.deploycfg| awk '{print $2}')
confluent_urls=""
for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed -e s/%/%25/); do
if [[ $addr == *:* ]]; then
confluent_urls="$confluent_urls $confluent_proto://[$addr]/confluent-public/os/$confluent_profile/rootimg.sfs"
else
confluent_urls="$confluent_urls $confluent_proto://$addr/confluent-public/os/$confluent_profile/rootimg.sfs"
fi
done
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg| awk '{print $2}')
mkdir -p /etc/confluent
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions
. /etc/confluent/functions
source_remote imageboot.sh

View File

@ -0,0 +1,47 @@
try:
import configparser
except ImportError:
import ConfigParser as configparser
import cStringIO
import importlib.util
import importlib.machinery
import sys
modloader = importlib.machinery.SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient')
modspec = importlib.util.spec_from_file_location('apiclient', '/opt/confluent/bin/apiclient', loader=modloader)
apiclient = importlib.util.module_from_spec(modspec)
modspec.loader.exec_module(apiclient)
repo = None
server = None
profile = None
with open('/etc/confluent/confluent.deploycfg') as dplcfgfile:
lines = dplcfgfile.read().split('\n')
for line in lines:
if line.startswith('deploy_server:'):
_, server = line.split(' ', 1)
if line.startswith('profile: '):
_, profile = line.split(' ', 1)
path = '/confluent-public/os/{0}/distribution/'.format(profile)
clnt = apiclient.HTTPSClient()
cfgdata = clnt.grab_url(path + '.treeinfo').decode()
c = configparser.ConfigParser()
try:
c.read_string(cfgdata)
except AttributeError:
f = cStringIO.StringIO(cfgdata)
c.readfp(f)
for sec in c.sections():
if sec.startswith('variant-'):
try:
repopath = c.get(sec, 'repository')
except Exception:
continue
_, varname = sec.split('-', 1)
reponame = '/etc/yum.repos.d/local-{0}.repo'.format(varname.lower())
with open(reponame, 'w') as repout:
repout.write('[local-{0}]\n'.format(varname.lower()))
repout.write('name=Local install repository for {0}\n'.format(varname))
if repopath[0] == '.':
repopath = repopath[1:]
repout.write('baseurl=https://{}/confluent-public/os/{}/distribution/{}\n'.format(server, profile, repopath))
repout.write('enabled=1\n')

View File

@ -0,0 +1,4 @@
. /etc/confluent/functions
# This is a convenient place to keep customizations separate from modifying the stock scripts
# While modification of the stock scripts is fine, it may be easier to rebase to a newer
# stock profile if the '.custom' files are used.

View File

@ -0,0 +1,11 @@
[Unit]
Description=First Boot Process
Requires=network-online.target
After=network-online.target
[Service]
ExecStart=/opt/confluent/bin/firstboot.sh
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,40 @@
#!/bin/sh
# This script is executed on the first boot after install has
# completed. It is best to edit the middle of the file as
# noted below so custom commands are executed before
# the script notifies confluent that install is fully complete.
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
exec >> /var/log/confluent/confluent-firstboot.log
exec 2>> /var/log/confluent/confluent-firstboot.log
chmod 600 /var/log/confluent/confluent-firstboot.log
tail -f /var/log/confluent/confluent-firstboot.log > /dev/console &
logshowpid=$!
while ! ping -c 1 $confluent_mgr >& /dev/null; do
sleep 1
done
if [ ! -f /etc/confluent/firstboot.ran ]; then
touch /etc/confluent/firstboot.ran
cat /etc/confluent/tls/*.pem >> /etc/pki/tls/certs/ca-bundle.crt
run_remote firstboot.custom
# Firstboot scripts may be placed into firstboot.d, e.g. firstboot.d/01-firstaction.sh, firstboot.d/02-secondaction.sh
run_remote_parts firstboot.d
# Induce execution of remote configuration, e.g. ansible plays in ansible/firstboot.d/
run_remote_config firstboot.d
fi
curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
systemctl disable firstboot
rm /etc/systemd/system/firstboot.service
rm /etc/confluent/firstboot.ran
kill $logshowpid

View File

@ -0,0 +1,196 @@
#!/bin/bash
function test_mgr() {
if curl -s https://${1}/confluent-api/ > /dev/null; then
return 0
fi
return 1
}
function confluentpython() {
if [ -x /usr/libexec/platform-python ]; then
/usr/libexec/platform-python $*
elif [ -x /usr/bin/python3 ]; then
/usr/bin/python3 $*
elif [ -x /usr/bin/python ]; then
/usr/bin/python $*
elif [ -x /usr/bin/python2 ]; then
/usr/bin/python2 $*
fi
}
function set_confluent_vars() {
if [ -z "$nodename" ]; then
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}')
fi
if [[ "$confluent_mgr" == *"%"* ]]; then
confluent_mgr=""
fi
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
if ! test_mgr $confluent_mgr; then
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
if [[ "$confluent_mgr" = *":"* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
fi
if ! test_mgr $confluent_mgr; then
BESTMGRS=$(grep ^EXTMGRINFO: /etc/confluent/confluent.info | grep '|1$' | sed -e 's/EXTMGRINFO: //' -e 's/|.*//')
OKMGRS=$(grep ^EXTMGRINFO: /etc/confluent/confluent.info | grep '|0$' | sed -e 's/EXTMGRINFO: //' -e 's/|.*//')
for confluent_mgr in $BESTMGRS $OKMGRS; do
if [[ $confluent_mgr == *":"* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
if test_mgr $confluent_mgr; then
break
fi
done
fi
fi
if [ -z "$confluent_profile" ]; then
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
fi
}
fetch_remote() {
curlargs=""
if [ -f /etc/confluent/ca.pem ]; then
curlargs=" --cacert /etc/confluent/ca.pem"
fi
set_confluent_vars
mkdir -p $(dirname $1)
curl -f -sS $curlargs https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/$1 > $1
if [ $? != 0 ]; then echo $1 failed to download; return 1; fi
}
source_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
fi
scriptlist=$(confluentpython $apiclient /confluent-api/self/scriptlist/$1|sed -e 's/^- //')
for script in $scriptlist; do
source_remote $1/$script
done
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
}
run_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
fi
scriptlist=$(confluentpython $apiclient /confluent-api/self/scriptlist/$1|sed -e 's/^- //')
for script in $scriptlist; do
run_remote $1/$script
done
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
}
source_remote() {
set_confluent_vars
unsettmpdir=0
echo
echo '---------------------------------------------------------------------------'
echo Sourcing $1 from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
echo Sourcing from $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
if [ $? != 0 ]; then echo $1 failed to download; return 1; fi
chmod +x $1
cmd=$1
shift
source ./$cmd
cd - > /dev/null
if [ "$unsettmpdir" = 1 ]; then
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
unsettmpdir=0
fi
rm -rf $confluentscripttmpdir
return $retcode
}
run_remote() {
requestedcmd="'$*'"
unsettmpdir=0
set_confluent_vars
echo
echo '---------------------------------------------------------------------------'
echo Running $requestedcmd from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
if [ $? != 0 ]; then echo $requestedcmd failed to download; return 1; fi
chmod +x $1
cmd=$1
if [ -x /usr/bin/chcon ]; then
chcon system_u:object_r:bin_t:s0 $cmd
fi
shift
./$cmd $*
retcode=$?
if [ $retcode -ne 0 ]; then
echo "$requestedcmd exited with code $retcode"
fi
cd - > /dev/null
if [ "$unsettmpdir" = 1 ]; then
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
unsettmpdir=0
fi
return $retcode
}
run_remote_python() {
echo
set_confluent_vars
if [ -f /etc/confluent/ca.pem ]; then
curlargs=" --cacert /etc/confluent/ca.pem"
fi
echo '---------------------------------------------------------------------------'
echo Running python script "'$*'" from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
mkdir -p $(dirname $1)
curl -f -sS $curlargs https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/$1 > $1
if [ $? != 0 ]; then echo "'$*'" failed to download; return 1; fi
confluentpython $*
retcode=$?
echo "'$*' exited with code $retcode"
cd - > /dev/null
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
return $retcode
}
run_remote_config() {
echo
set_confluent_vars
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
fi
echo '---------------------------------------------------------------------------'
echo Requesting to run remote configuration for "'$*'" from $confluent_mgr under profile $confluent_profile
confluentpython $apiclient /confluent-api/self/remoteconfig/"$*" -d {}
confluentpython $apiclient /confluent-api/self/remoteconfig/status -w 204
echo
echo 'Completed remote configuration'
echo '---------------------------------------------------------------------------'
return
}
#If invoked as a command, use the arguments to actually run a function
(return 0 2>/dev/null) || $1 "${@:2}"

View File

@ -0,0 +1,93 @@
import subprocess
import os
class DiskInfo(object):
def __init__(self, devname):
self.name = devname
self.wwn = None
self.path = None
self.model = ''
self.size = 0
self.driver = None
self.mdcontainer = ''
devnode = '/dev/{0}'.format(devname)
qprop = subprocess.check_output(
['udevadm', 'info', '--query=property', devnode])
if not isinstance(qprop, str):
qprop = qprop.decode('utf8')
for prop in qprop.split('\n'):
if '=' not in prop:
continue
k, v = prop.split('=', 1)
if k == 'DEVTYPE' and v != 'disk':
raise Exception('Not a disk')
elif k == 'DM_NAME':
raise Exception('Device Mapper')
elif k == 'ID_MODEL':
self.model = v
elif k == 'DEVPATH':
self.path = v
elif k == 'ID_WWN':
self.wwn = v
elif k == 'MD_CONTAINER':
self.mdcontainer = v
attrs = subprocess.check_output(['udevadm', 'info', '-a', devnode])
if not isinstance(attrs, str):
attrs = attrs.decode('utf8')
for attr in attrs.split('\n'):
if '==' not in attr:
continue
k, v = attr.split('==', 1)
k = k.strip()
if k == 'ATTRS{size}':
self.size = v.replace('"', '')
elif (k == 'DRIVERS' and not self.driver
and v not in ('"sd"', '""')):
self.driver = v.replace('"', '')
if not self.driver and 'imsm' not in self.mdcontainer:
raise Exception("No driver detected")
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
self.size = int(sizesrc.read()) * 512
if int(self.size) < 536870912:
raise Exception("Device too small for install")
@property
def priority(self):
if self.model.lower() in ('thinksystem_m.2_vd', 'thinksystem m.2', 'thinksystem_m.2'):
return 0
if 'imsm' in self.mdcontainer:
return 1
if self.driver == 'ahci':
return 2
if self.driver.startswith('megaraid'):
return 3
if self.driver.startswith('mpt'):
return 4
return 99
def __repr__(self):
return repr({
'name': self.name,
'path': self.path,
'wwn': self.wwn,
'driver': self.driver,
'size': self.size,
'model': self.model,
})
def main():
disks = []
for disk in sorted(os.listdir('/sys/class/block')):
try:
disk = DiskInfo(disk)
disks.append(disk)
except Exception as e:
print("Skipping {0}: {1}".format(disk, str(e)))
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
if nd:
open('/tmp/installdisk', 'w').write(nd[0])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,376 @@
#!/usr/bin/python3
import glob
import json
import os
import re
import time
import shutil
import socket
import stat
import struct
import sys
import subprocess
def get_next_part_meta(img, imgsize):
if img.tell() == imgsize:
return None
pathlen = struct.unpack('!H', img.read(2))[0]
mountpoint = img.read(pathlen).decode('utf8')
jsonlen = struct.unpack('!I', img.read(4))[0]
metadata = json.loads(img.read(jsonlen).decode('utf8'))
img.seek(16, 1) # skip the two 64-bit values we don't use, they are in json
nextlen = struct.unpack('!H', img.read(2))[0]
img.seek(nextlen, 1) # skip filesystem type
nextlen = struct.unpack('!H', img.read(2))[0]
img.seek(nextlen, 1) # skip orig devname (redundant with json)
nextlen = struct.unpack('!H', img.read(2))[0]
img.seek(nextlen, 1) # skip padding
nextlen = struct.unpack('!Q', img.read(8))[0]
img.seek(nextlen, 1) # go to next section
return metadata
def get_multipart_image_meta(img):
img.seek(0, 2)
imgsize = img.tell()
img.seek(16)
seekamt = img.read(1)
img.seek(struct.unpack('B', seekamt)[0], 1)
partinfo = get_next_part_meta(img, imgsize)
while partinfo:
yield partinfo
partinfo = get_next_part_meta(img, imgsize)
def get_image_metadata(imgpath):
with open(imgpath, 'rb') as img:
header = img.read(16)
if header == b'\x63\x7b\x9d\x26\xb7\xfd\x48\x30\x89\xf9\x11\xcf\x18\xfd\xff\xa1':
for md in get_multipart_image_meta(img):
yield md
else:
raise Exception('Installation from single part image not supported')
class PartedRunner():
def __init__(self, disk):
self.disk = disk
def run(self, command):
command = command.split()
command = ['parted', '-a', 'optimal', '-s', self.disk] + command
return subprocess.check_output(command).decode('utf8')
def fixup(rootdir, vols):
devbymount = {}
for vol in vols:
devbymount[vol['mount']] = vol['targetdisk']
fstabfile = os.path.join(rootdir, 'etc/fstab')
with open(fstabfile) as tfile:
fstab = tfile.read().split('\n')
while not fstab[0]:
fstab = fstab[1:]
if os.path.exists(os.path.join(rootdir, '.autorelabel')):
os.unlink(os.path.join(rootdir, '.autorelabel'))
with open(fstabfile, 'w') as tfile:
for tab in fstab:
entry = tab.split()
if tab.startswith('#ORIGFSTAB#'):
if entry[1] in devbymount:
targetdev = devbymount[entry[1]]
if targetdev.startswith('/dev/localstorage/'):
entry[0] = targetdev
else:
uuid = subprocess.check_output(['blkid', '-s', 'UUID', '-o', 'value', targetdev]).decode('utf8')
uuid = uuid.strip()
entry[0] = 'UUID={}'.format(uuid)
elif entry[2] == 'swap':
entry[0] = '/dev/mapper/localstorage-swap'
entry[0] = entry[0].ljust(42)
entry[1] = entry[1].ljust(16)
entry[3] = entry[3].ljust(28)
tab = '\t'.join(entry)
tfile.write(tab + '\n')
with open(os.path.join(rootdir, 'etc/hostname'), 'w') as nameout:
nameout.write(socket.gethostname() + '\n')
selinuxconfig = os.path.join(rootdir, 'etc/selinux/config')
policy = None
if os.path.exists(selinuxconfig):
with open(selinuxconfig) as cfgin:
sec = cfgin.read().split('\n')
for l in sec:
l = l.split('#', 1)[0]
if l.startswith('SELINUXTYPE='):
_, policy = l.split('=')
for sshkey in glob.glob(os.path.join(rootdir, 'etc/ssh/*_key*')):
os.unlink(sshkey)
for sshkey in glob.glob('/etc/ssh/*_key*'):
newkey = os.path.join(rootdir, sshkey[1:])
shutil.copy2(sshkey, newkey)
finfo = os.stat(sshkey)
os.chown(newkey, finfo[stat.ST_UID], finfo[stat.ST_GID])
for ifcfg in glob.glob(os.path.join(rootdir, 'etc/sysconfig/network-scripts/*')):
os.unlink(ifcfg)
for ifcfg in glob.glob(os.path.join(rootdir, 'etc/NetworkManager/system-connections/*')):
os.unlink(ifcfg)
for ifcfg in glob.glob('/run/NetworkManager/system-connections/*'):
newcfg = ifcfg.split('/')[-1]
newcfg = os.path.join(rootdir, 'etc/NetworkManager/system-connections/{0}'.format(newcfg))
shutil.copy2(ifcfg, newcfg)
shutil.rmtree(os.path.join(rootdir, 'etc/confluent/'))
shutil.copytree('/etc/confluent', os.path.join(rootdir, 'etc/confluent'))
if policy:
sys.stdout.write('Applying SELinux labeling...')
sys.stdout.flush()
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'etc')])
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'opt')])
sys.stdout.write('Done\n')
sys.stdout.flush()
for metafs in ('proc', 'sys', 'dev'):
subprocess.check_call(['mount', '-o', 'bind', '/{}'.format(metafs), os.path.join(rootdir, metafs)])
with open(os.path.join(rootdir, 'etc/sysconfig/grub')) as defgrubin:
defgrub = defgrubin.read().split('\n')
with open(os.path.join(rootdir, 'etc/sysconfig/grub'), 'w') as defgrubout:
for gline in defgrub:
gline = gline.split()
newline = []
for ent in gline:
if ent.startswith('resume=') or ent.startswith('rd.lvm.lv'):
continue
newline.append(ent)
defgrubout.write(' '.join(newline) + '\n')
grubcfg = subprocess.check_output(['find', os.path.join(rootdir, 'boot'), '-name', 'grub.cfg']).decode('utf8').strip().replace(rootdir, '/')
subprocess.check_call(['chroot', rootdir, 'grub2-mkconfig', '-o', grubcfg])
newroot = None
with open('/etc/shadow') as shadowin:
shents = shadowin.read().split('\n')
for shent in shents:
shent = shent.split(':')
if not shent:
continue
if shent[0] == 'root' and shent[1] not in ('*', '!!', ''):
newroot = shent[1]
if newroot:
shlines = None
with open(os.path.join(rootdir, 'etc/shadow')) as oshadow:
shlines = oshadow.read().split('\n')
with open(os.path.join(rootdir, 'etc/shadow'), 'w') as oshadow:
for line in shlines:
if line.startswith('root:'):
line = line.split(':')
line[1] = newroot
line = ':'.join(line)
oshadow.write(line + '\n')
partnum = None
targblock = None
for vol in vols:
if vol['mount'] == '/boot/efi':
targdev = vol['targetdisk']
partnum = re.search('(\d+)$', targdev).group(1)
targblock = re.search('(.*)\d+$', targdev).group(1)
if targblock:
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
#other network interfaces
def had_swap():
with open('/etc/fstab') as tabfile:
tabs = tabfile.read().split('\n')
for tab in tabs:
tab = tab.split()
if len(tab) < 3:
continue
if tab[2] == 'swap':
return True
return False
def install_to_disk(imgpath):
lvmvols = {}
deftotsize = 0
mintotsize = 0
deflvmsize = 0
minlvmsize = 0
biggestsize = 0
biggestfs = None
plainvols = {}
allvols = []
swapsize = 0
if had_swap():
with open('/proc/meminfo') as meminfo:
swapsize = meminfo.read().split('\n')[0]
swapsize = int(swapsize.split()[1])
if swapsize < 2097152:
swapsize = swapsize * 2
elif swapsize > 8388608 and swapsize < 67108864:
swapsize = swapsize * 0.5
elif swapsize >= 67108864:
swapsize = 33554432
swapsize = int(swapsize * 1024)
deftotsize = swapsize
mintotsize = swapsize
for fs in get_image_metadata(imgpath):
allvols.append(fs)
deftotsize += fs['initsize']
mintotsize += fs['minsize']
if fs['initsize'] > biggestsize:
biggestfs = fs
biggestsize = fs['initsize']
if fs['device'].startswith('/dev/mapper'):
lvmvols[fs['device'].replace('/dev/mapper/', '')] = fs
deflvmsize += fs['initsize']
minlvmsize += fs['minsize']
else:
plainvols[int(re.search('(\d+)$', fs['device'])[0])] = fs
with open('/tmp/installdisk') as diskin:
instdisk = diskin.read()
instdisk = '/dev/' + instdisk
parted = PartedRunner(instdisk)
dinfo = parted.run('unit s print')
dinfo = dinfo.split('\n')
sectors = 0
sectorsize = 0
for inf in dinfo:
if inf.startswith('Disk {0}:'.format(instdisk)):
_, sectors = inf.split(': ')
sectors = int(sectors.replace('s', ''))
if inf.startswith('Sector size (logical/physical):'):
_, sectorsize = inf.split(':')
sectorsize = sectorsize.split('/')[0]
sectorsize = sectorsize.replace('B', '')
sectorsize = int(sectorsize)
# for now, only support resizing/growing the largest partition
minexcsize = deftotsize - biggestfs['initsize']
mintotsize = deftotsize - biggestfs['initsize'] + biggestfs['minsize']
minsectors = mintotsize // sectorsize
if sectors < (minsectors + 65536):
raise Exception('Disk too small to fit image')
biggestsectors = sectors - (minexcsize // sectorsize)
biggestsize = sectorsize * biggestsectors
parted.run('mklabel gpt')
curroffset = 2048
for volidx in sorted(plainvols):
vol = plainvols[volidx]
if vol is not biggestfs:
size = vol['initsize'] // sectorsize
else:
size = biggestsize // sectorsize
size += 2047 - (size % 2048)
end = curroffset + size
if end > sectors:
end = sectors
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
vol['targetdisk'] = instdisk + '{0}'.format(volidx)
curroffset += size + 1
if not lvmvols:
if swapsize:
swapsize = swapsize // sectorsize
swapsize += 2047 - (size % 2048)
end = curroffset + swapsize
if end > sectors:
end = sectors
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
subprocess.check_call(['mkswap', instdisk + '{}'.format(volidx + 1)])
else:
parted.run('mkpart lvm {}s 100%'.format(curroffset))
lvmpart = instdisk + '{}'.format(volidx + 1)
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
vginfo = vginfo.split('\n')
pesize = 0
pes = 0
for infline in vginfo:
infline = infline.split()
if len(infline) >= 3 and infline[:2] == ['PE', 'Size']:
pesize = int(infline[2])
if len(infline) >= 5 and infline[:2] == ['Free', 'PE']:
pes = int(infline[4])
takeaway = swapsize // pesize
for volidx in lvmvols:
vol = lvmvols[volidx]
if vol is biggestfs:
continue
takeaway += vol['initsize'] // pesize
takeaway += 1
biggestextents = pes - takeaway
for volidx in lvmvols:
vol = lvmvols[volidx]
if vol is biggestfs:
extents = biggestextents
else:
extents = vol['initsize'] // pesize
extents += 1
if vol['mount'] == '/':
lvname = 'root'
else:
lvname = vol['mount'].replace('/', '_')
subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, 'localstorage'])
vol['targetdisk'] = '/dev/localstorage/{}'.format(lvname)
if swapsize:
subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', 'localstorage'])
subprocess.check_call(['mkswap', '/dev/localstorage/swap'])
os.makedirs('/run/imginst/targ')
for vol in allvols:
with open(vol['targetdisk'], 'wb') as partition:
partition.write(b'\x00' * 1 * 1024 * 1024)
subprocess.check_call(['mkfs.{}'.format(vol['filesystem']), vol['targetdisk']])
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ'])
source = vol['mount'].replace('/', '_')
source = '/run/imginst/sources/' + source
blankfsstat = os.statvfs('/run/imginst/targ')
blankused = (blankfsstat.f_blocks - blankfsstat.f_bfree) * blankfsstat.f_bsize
sys.stdout.write('\nWriting {0}: '.format(vol['mount']))
with subprocess.Popen(['cp', '-ax', source + '/.', '/run/imginst/targ']) as copier:
stillrunning = copier.poll()
lastprogress = 0.0
while stillrunning is None:
currfsstat = os.statvfs('/run/imginst/targ')
currused = (currfsstat.f_blocks - currfsstat.f_bfree) * currfsstat.f_bsize
currused -= blankused
with open('/proc/meminfo') as meminf:
for line in meminf.read().split('\n'):
if line.startswith('Dirty:'):
_, dirty, _ = line.split()
dirty = int(dirty) * 1024
progress = (currused - dirty) / vol['minsize']
if progress < lastprogress:
progress = lastprogress
if progress > 0.99:
progress = 0.99
lastprogress = progress
progress = progress * 100
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
sys.stdout.flush()
time.sleep(0.5)
stillrunning = copier.poll()
if stillrunning != 0:
raise Exception("Error copying volume")
with subprocess.Popen(['sync']) as syncrun:
stillrunning = syncrun.poll()
while stillrunning is None:
with open('/proc/meminfo') as meminf:
for line in meminf.read().split('\n'):
if line.startswith('Dirty:'):
_, dirty, _ = line.split()
dirty = int(dirty) * 1024
progress = (vol['minsize'] - dirty) / vol['minsize']
if progress < lastprogress:
progress = lastprogress
if progress > 0.99:
progress = 0.99
lastprogress = progress
progress = progress * 100
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
sys.stdout.flush()
time.sleep(0.5)
stillrunning = syncrun.poll()
sys.stdout.write('\x1b[1K\rDone writing {0}'.format(vol['mount']))
sys.stdout.write('\n')
sys.stdout.flush()
subprocess.check_call(['umount', '/run/imginst/targ'])
for vol in allvols:
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ/' + vol['mount']])
fixup('/run/imginst/targ', allvols)
if __name__ == '__main__':
install_to_disk(os.environ['mountsrc'])

View File

@ -0,0 +1,127 @@
. /lib/dracut-lib.sh
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay
if [ "untethered" = "$(getarg confluent_imagemethod)" ]; then
mount -t tmpfs untethered /mnt/remoteimg
curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs
else
confluent_urls="$confluent_urls https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs"
/opt/confluent/bin/urlmount $confluent_urls /mnt/remoteimg
fi
/opt/confluent/bin/confluent_imginfo /mnt/remoteimg/rootimg.sfs > /tmp/rootimg.info
loopdev=$(losetup -f)
export mountsrc=$loopdev
losetup -r $loopdev /mnt/remoteimg/rootimg.sfs
if grep '^Format: confluent_crypted' /tmp/rootimg.info > /dev/null; then
while ! curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_mgr/confluent-api/self/profileprivate/pending/rootimg.key > /tmp/rootimg.key; do
echo "Unable to retrieve private key from $confluent_mgr (verify that confluent can access /var/lib/confluent/private/$confluent_profile/pending/rootimg.key)"
sleep 1
done
cipher=$(head -n 1 /tmp/rootimg.key)
key=$(tail -n 1 /tmp/rootimg.key)
len=$(wc -c /mnt/remoteimg/rootimg.sfs | awk '{print $1}')
len=$(((len-4096)/512))
dmsetup create cryptimg --table "0 $len crypt $cipher $key 0 $loopdev 8"
/opt/confluent/bin/confluent_imginfo /dev/mapper/cryptimg > /tmp/rootimg.info
mountsrc=/dev/mapper/cryptimg
fi
if grep '^Format: squashfs' /tmp/rootimg.info > /dev/null; then
mount -o ro $mountsrc /mnt/remote
elif grep '^Format: confluent_multisquash' /tmp/rootimg.info; then
tail -n +3 /tmp/rootimg.info | awk '{gsub("/", "_"); print "echo 0 " $4 " linear '$mountsrc' " $3 " | dmsetup create mproot" $7}' > /tmp/setupmount.sh
. /tmp/setupmount.sh
cat /tmp/setupmount.sh |awk '{printf "mount /dev/mapper/"$NF" "; sub("mproot", ""); gsub("_", "/"); print "/mnt/remote"$NF}' > /tmp/mountparts.sh
. /tmp/mountparts.sh
fi
#mount -t tmpfs overlay /mnt/overlay
modprobe zram
memtot=$(grep ^MemTotal: /proc/meminfo|awk '{print $2}')
memtot=$((memtot/2))$(grep ^MemTotal: /proc/meminfo | awk '{print $3'})
echo $memtot > /sys/block/zram0/disksize
mkfs.xfs /dev/zram0 > /dev/null
mount -o discard /dev/zram0 /mnt/overlay
if [ ! -f /tmp/mountparts.sh ]; then
mkdir -p /mnt/overlay/upper /mnt/overlay/work
mount -t overlay -o upperdir=/mnt/overlay/upper,workdir=/mnt/overlay/work,lowerdir=/mnt/remote disklessroot /sysroot
else
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $3}'); do
mkdir -p /mnt/overlay${srcmount}/upper /mnt/overlay${srcmount}/work
mount -t overlay -o upperdir=/mnt/overlay${srcmount}/upper,workdir=/mnt/overlay${srcmount}/work,lowerdir=${srcmount} disklesspart /sysroot${srcmount#/mnt/remote}
done
fi
mkdir -p /sysroot/etc/ssh
mkdir -p /sysroot/etc/confluent
mkdir -p /sysroot/root/.ssh
cp /root/.ssh/* /sysroot/root/.ssh
chmod 700 /sysroot/root/.ssh
cp /etc/confluent/* /sysroot/etc/confluent/
cp /etc/ssh/*key* /sysroot/etc/ssh/
for pubkey in /etc/ssh/ssh_host*key.pub; do
certfile=${pubkey/.pub/-cert.pub}
privfile=${pubkey%.pub}
if [ -s $certfile ]; then
echo HostCertificate $certfile >> /sysroot/etc/ssh/sshd_config
fi
echo HostKey $privfile >> /sysroot/etc/ssh/sshd_config
done
mkdir -p /sysroot/dev /sysroot/sys /sysroot/proc /sysroot/run
if [ ! -z "$autocons" ]; then
autocons=${autocons%,*}
mkdir -p /run/systemd/generator/getty.target.wants
ln -s /usr/lib/systemd/system/serial-getty@.service /run/systemd/generator/getty.target.wants/serial-getty@${autocons}.service
fi
while [ ! -e /sysroot/sbin/init ]; do
echo "Failed to access root filesystem or it is missing /sbin/init"
echo "System should be accessible through ssh at port 2222 with the appropriate key"
while [ ! -e /sysroot/sbin/init ]; do
sleep 1
done
done
rootpassword=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg)
rootpassword=${rootpassword#rootpassword: }
if [ "$rootpassword" = "null" ]; then
rootpassword=""
fi
if [ ! -z "$rootpassword" ]; then
sed -i "s@root:[^:]*:@root:$rootpassword:@" /sysroot/etc/shadow
fi
for i in /ssh/*.ca; do
echo '@cert-authority *' $(cat $i) >> /sysroot/etc/ssh/ssh_known_hosts
done
echo HostbasedAuthentication yes >> /sysroot/etc/ssh/sshd_config
echo HostbasedUsesNameFromPacketOnly yes >> /sysroot/etc/ssh/sshd_config
echo IgnoreRhosts no >> /sysroot/etc/ssh/sshd_config
sshconf=/sysroot/etc/ssh/ssh_config
if [ -d /sysroot/etc/ssh/ssh_config.d/ ]; then
sshconf=/sysroot/etc/ssh/ssh_config.d/01-confluent.conf
fi
echo 'Host *' >> $sshconf
echo ' HostbasedAuthentication yes' >> $sshconf
echo ' EnableSSHKeysign yes' >> $sshconf
echo ' HostbasedKeyTypes *ed25519*' >> $sshconf
curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_mgr/confluent-api/self/nodelist > /sysroot/etc/ssh/shosts.equiv
cp /sysroot/etc/ssh/shosts.equiv /sysroot/root/.shosts
chmod 640 /sysroot/etc/ssh/*_key
chroot /sysroot chgrp ssh_keys /etc/ssh/*_key
cp /tls/*.pem /sysroot/etc/pki/ca-trust/source/anchors/
chroot /sysroot/ update-ca-trust
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/onboot.service > /sysroot/etc/systemd/system/onboot.service
mkdir -p /sysroot/opt/confluent/bin
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/onboot.sh > /sysroot/opt/confluent/bin/onboot.sh
chmod +x /sysroot/opt/confluent/bin/onboot.sh
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
ln -s /etc/systemd/system/onboot.service /sysroot/etc/systemd/system/multi-user.target.wants/onboot.service
cp /etc/confluent/functions /sysroot/etc/confluent/functions
if grep installtodisk /proc/cmdline > /dev/null; then
. /etc/confluent/functions
run_remote installimage
exec reboot -f
fi
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
kill $(grep -l ^/usr/lib/systemd/systemd-udevd /proc/*/cmdline|cut -d/ -f 3)
exec /opt/confluent/bin/start_root

View File

@ -0,0 +1,43 @@
#!/bin/bash
. /etc/confluent/functions
# the image will be used to deploy itself
# provide both access to image (for parsing metadata)
# and existing mounts of image (to take advantage of caching)
mount -o bind /sys /sysroot/sys
mount -o bind /dev /sysroot/dev
mount -o bind /proc /sysroot/proc
mount -o bind /run /sysroot/run
if [ ! -f /tmp/mountparts.sh ]; then
mkdir -p /sysroot/run/imginst/sources/_
mount -o bind /mnt/remote /sysroot/run/imginst/sources/_
else
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $2}'); do
srcname=${srcmount#/dev/mapper/mproot}
srcdir=$(echo $srcmount | sed -e 's!/dev/mapper/mproot!/mnt/remote!' -e 's!_!/!g')
mkdir -p /sysroot/run/imginst/sources/$srcname
mount -o bind $srcdir /sysroot/run/imginst/sources/$srcname
done
fi
cd /sysroot/run
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_python getinstalldisk"
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_parts pre.d"
if [ ! -f /sysroot/tmp/installdisk ]; then
echo 'Unable to find a suitable installation target device, ssh to port 2222 to investigate'
while [ ! -f /sysroot/tmp/installdisk ]; do
sleep 1
done
fi
lvm vgchange -a n
udevadm control -e
chroot /sysroot /usr/lib/systemd/systemd-udevd --daemon
chroot /sysroot bash -c "source /etc/confluent/functions; run_remote_python image2disk.py"
echo "Port 22" >> /etc/ssh/sshd_config
echo 'Match LocalPort 22' >> /etc/ssh/sshd_config
echo ' ChrootDirectory /sysroot/run/imginst/targ' >> /etc/ssh/sshd_config
kill -HUP $(cat /run/sshd.pid)
chroot /sysroot/run/imginst/targ bash -c "source /etc/confluent/functions; run_remote post.sh"
chroot /sysroot bash -c "umount \$(tac /proc/mounts|awk '{print \$2}'|grep ^/run/imginst/targ)"

View File

@ -0,0 +1,11 @@
[Unit]
Description=Confluent onboot hook
Requires=network-online.target
After=network-online.target
[Service]
ExecStart=/opt/confluent/bin/onboot.sh
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,36 @@
#!/bin/sh
# This script is executed on each boot as it is
# completed. It is best to edit the middle of the file as
# noted below so custom commands are executed before
# the script notifies confluent that install is fully complete.
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
mkdir -p /var/log/confluent
chmod 700 /var/log/confluent
exec >> /var/log/confluent/confluent-onboot.log
exec 2>> /var/log/confluent/confluent-onboot.log
chmod 600 /var/log/confluent/confluent-onboot.log
tail -f /var/log/confluent/confluent-onboot.log > /dev/console &
logshowpid=$!
rpm --import /etc/pki/rpm-gpg/*
run_remote_python add_local_repositories
run_remote_python syncfileclient
run_remote_python confignet
run_remote onboot.custom
# onboot scripts may be placed into onboot.d, e.g. onboot.d/01-firstaction.sh, onboot.d/02-secondaction.sh
run_remote_parts onboot.d
# Induce execution of remote configuration, e.g. ansible plays in ansible/onboot.d/
run_remote_config onboot.d
#curl -X POST -d 'status: booted' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
kill $logshowpid

View File

@ -0,0 +1,39 @@
#!/bin/sh
# This script is executed 'chrooted' into a cloned disk target before rebooting
#
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
mkdir -p /var/log/confluent
chmod 700 /var/log/confluent
exec >> /var/log/confluent/confluent-post.log
exec 2>> /var/log/confluent/confluent-post.log
chmod 600 /var/log/confluent/confluent-post.log
tail -f /var/log/confluent/confluent-post.log > /dev/console &
logshowpid=$!
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
mkdir -p /opt/confluent/bin
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
chmod +x /opt/confluent/bin/firstboot.sh
systemctl enable firstboot
selinuxpolicy=$(grep ^SELINUXTYPE /etc/selinux/config |awk -F= '{print $2}')
if [ ! -z "$selinuxpolicy" ]; then
setfiles /etc/selinux/${selinuxpolicy}/contexts/files/file_contexts /etc/
fi
run_remote_python syncfileclient
run_remote post.custom
# post scripts may be placed into post.d, e.g. post.d/01-firstaction.sh, post.d/02-secondaction.sh
run_remote_parts post.d
# Induce execution of remote configuration, e.g. ansible plays in ansible/post.d/
run_remote_config post.d
curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
kill $logshowpid

View File

@ -0,0 +1,272 @@
#!/usr/bin/python
import importlib
import tempfile
import json
import os
import shutil
import pwd
import grp
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
except FileNotFoundError:
apiclient = SourceFileLoader('apiclient', '/etc/confluent/apiclient').load_module()
def partitionhostsline(line):
comment = ''
try:
cmdidx = line.index('#')
comment = line[cmdidx:]
line = line[:cmdidx].strip()
except ValueError:
pass
if not line:
return '', [], comment
ipaddr, names = line.split(maxsplit=1)
names = names.split()
return ipaddr, names, comment
class HostMerger(object):
def __init__(self):
self.byip = {}
self.byname = {}
self.sourcelines = []
self.targlines = []
def read_source(self, sourcefile):
with open(sourcefile, 'r') as hfile:
self.sourcelines = hfile.read().split('\n')
while not self.sourcelines[-1]:
self.sourcelines = self.sourcelines[:-1]
for x in range(len(self.sourcelines)):
line = self.sourcelines[x]
currip, names, comment = partitionhostsline(line)
if currip:
self.byip[currip] = x
for name in names:
self.byname[name] = x
def read_target(self, targetfile):
with open(targetfile, 'r') as hfile:
lines = hfile.read().split('\n')
if not lines[-1]:
lines = lines[:-1]
for y in range(len(lines)):
line = lines[y]
currip, names, comment = partitionhostsline(line)
if currip in self.byip:
x = self.byip[currip]
if self.sourcelines[x] is None:
# have already consumed this enntry
continue
self.targlines.append(self.sourcelines[x])
self.sourcelines[x] = None
continue
for name in names:
if name in self.byname:
x = self.byname[name]
if self.sourcelines[x] is None:
break
self.targlines.append(self.sourcelines[x])
self.sourcelines[x] = None
break
else:
self.targlines.append(line)
def write_out(self, targetfile):
while not self.targlines[-1]:
self.targlines = self.targlines[:-1]
if not self.targlines:
break
while not self.sourcelines[-1]:
self.sourcelines = self.sourcelines[:-1]
if not self.sourcelines:
break
with open(targetfile, 'w') as hosts:
for line in self.targlines:
hosts.write(line + '\n')
for line in self.sourcelines:
if line is not None:
hosts.write(line + '\n')
class CredMerger:
def __init__(self):
try:
with open('/etc/login.defs', 'r') as ldefs:
defs = ldefs.read().split('\n')
except FileNotFoundError:
defs = []
lkup = {}
self.discardnames = {}
self.shadowednames = {}
for line in defs:
try:
line = line[:line.index('#')]
except ValueError:
pass
keyval = line.split()
if len(keyval) < 2:
continue
lkup[keyval[0]] = keyval[1]
self.uidmin = int(lkup.get('UID_MIN', 1000))
self.uidmax = int(lkup.get('UID_MAX', 60000))
self.gidmin = int(lkup.get('GID_MIN', 1000))
self.gidmax = int(lkup.get('GID_MAX', 60000))
self.shadowlines = None
def read_passwd(self, source, targfile=False):
self.read_generic(source, self.uidmin, self.uidmax, targfile)
def read_group(self, source, targfile=False):
self.read_generic(source, self.gidmin, self.gidmax, targfile)
def read_generic(self, source, minid, maxid, targfile):
if targfile:
self.targdata = []
else:
self.sourcedata = []
with open(source, 'r') as inputfile:
for line in inputfile.read().split('\n'):
try:
name, _, uid, _ = line.split(':', 3)
uid = int(uid)
except ValueError:
continue
if targfile:
if uid < minid or uid > maxid:
self.targdata.append(line)
else:
self.discardnames[name] = 1
else:
if name[0] in ('+', '#', '@'):
self.sourcedata.append(line)
elif uid >= minid and uid <= maxid:
self.sourcedata.append(line)
def read_shadow(self, source):
self.shadowlines = []
try:
with open(source, 'r') as inshadow:
for line in inshadow.read().split('\n'):
try:
name, _ = line.split(':' , 1)
except ValueError:
continue
if name in self.discardnames:
continue
self.shadowednames[name] = 1
self.shadowlines.append(line)
except FileNotFoundError:
return
def write_out(self, outfile):
with open(outfile, 'w') as targ:
for line in self.targdata:
targ.write(line + '\n')
for line in self.sourcedata:
targ.write(line + '\n')
if outfile == '/etc/passwd':
if self.shadowlines is None:
self.read_shadow('/etc/shadow')
with open('/etc/shadow', 'w') as shadout:
for line in self.shadowlines:
shadout.write(line + '\n')
for line in self.sourcedata:
name, _ = line.split(':', 1)
if name[0] in ('+', '#', '@'):
continue
if name in self.shadowednames:
continue
shadout.write(name + ':!:::::::\n')
if outfile == '/etc/group':
if self.shadowlines is None:
self.read_shadow('/etc/gshadow')
with open('/etc/gshadow', 'w') as shadout:
for line in self.shadowlines:
shadout.write(line + '\n')
for line in self.sourcedata:
name, _ = line.split(':' , 1)
if name in self.shadowednames:
continue
shadout.write(name + ':!::\n')
def appendonce(basepath, filename):
with open(filename, 'rb') as filehdl:
thedata = filehdl.read()
targname = filename.replace(basepath, '')
try:
with open(targname, 'rb') as filehdl:
targdata = filehdl.read()
except IOError:
targdata = b''
if thedata in targdata:
return
with open(targname, 'ab') as targhdl:
targhdl.write(thedata)
def synchronize():
tmpdir = tempfile.mkdtemp()
appendoncedir = tempfile.mkdtemp()
try:
ac = apiclient.HTTPSClient()
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status == 202:
lastrsp = ''
while status != 204:
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
if status == 200:
lastrsp = rsp
pendpasswd = os.path.join(tmpdir, 'etc/passwd')
if os.path.exists(pendpasswd):
cm = CredMerger()
cm.read_passwd(pendpasswd, targfile=False)
cm.read_passwd('/etc/passwd', targfile=True)
cm.write_out('/etc/passwd')
pendgroup = os.path.join(tmpdir, 'etc/group')
if os.path.exists(pendgroup):
cm = CredMerger()
cm.read_group(pendgroup, targfile=False)
cm.read_group('/etc/group', targfile=True)
cm.write_out('/etc/group')
pendhosts = os.path.join(tmpdir, 'etc/hosts')
if os.path.exists(pendhosts):
cm = HostMerger()
cm.read_source(pendhosts)
cm.read_target('/etc/hosts')
cm.write_out('/etc/hosts')
for dirn in os.walk(appendoncedir):
for filen in dirn[2]:
appendonce(appendoncedir, os.path.join(dirn[0], filen))
if lastrsp:
lastrsp = json.loads(lastrsp)
opts = lastrsp.get('options', {})
for fname in opts:
uid = -1
gid = -1
for opt in opts[fname]:
if opt == 'owner':
try:
uid = pwd.getpwnam(opts[fname][opt]['name']).pw_uid
except KeyError:
uid = opts[fname][opt]['id']
elif opt == 'group':
try:
gid = grp.getgrnam(opts[fname][opt]['name']).gr_gid
except KeyError:
gid = opts[fname][opt]['id']
elif opt == 'permissions':
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()

View File

@ -0,0 +1,29 @@
# It is advised to avoid /var/lib/confluent/public as a source for syncing. /var/lib/confluent/public
# is served without authentication and thus any sensitive content would be a risk. If wanting to host
# syncfiles on a common share, it is suggested to have /var/lib/confluent be the share and use some other
# subdirectory other than public.
#
# Syncing is performed as the 'confluent' user, so all source files must be accessible by the confluent user.
#
# This file lists files to synchronize or merge to the deployed systems from the deployment server
# To specify taking /some/path/hosts on the deployment server and duplicating it to /etc/hosts:
# Note particularly the use of '->' to delineate source from target.
# /some/path/hosts -> /etc/hosts
# If wanting to simply use the same path for source and destinaiton, the -> may be skipped:
# /etc/hosts
# More function is available, for example to limit the entry to run only on n1 through n8, and to set
# owner, group, and permissions in octal notation:
# /example/source -> n1-n8:/etc/target (owner=root,group=root,permissions=600)
# Entries under APPENDONCE: will be added to specified target, only if the target does not already
# contain the data in the source already in its entirety. This allows append in a fashion that
# is friendly to being run repeatedly
# Entries under MERGE: will attempt to be intelligently merged. This supports /etc/group and /etc/passwd
# Any supporting entries in /etc/shadow or /etc/gshadow are added automatically, with password disabled
# It also will not inject 'system' ids (under 1,000 usually) as those tend to be local and rpm managed.
MERGE:
# /etc/passwd
# /etc/group

View File

@ -119,6 +119,9 @@ proto=$(grep ^protocol: /etc/confluent/confluent.deploycfg)
proto=${proto#protocol: }
append=$(grep ^installedargs: /tmp/profile.yaml | sed -e 's/^installedargs: //' -e 's/#.*//')
if grep console= /etc/fakecmdline >& /dev/null && [[ "$append" != *console=* ]]; then
append="$append console=${autocons#*/dev/}"
fi
if [ -z "$append" ]; then
echo "<bootloader/>" > /tmp/bootloader.xml
else

View File

@ -69,7 +69,11 @@ if args[0] == 'restore':
if options.interactivepassword:
password = getpass.getpass('Enter password to restore backup: ')
try:
cfm.init(True)
cfm.statelessmode = True
cfm.restore_db_from_directory(dumpdir, password)
cfm.statelessmode = False
cfm.ConfigManager.wait_for_sync(True)
if owner != 0:
for targdir in os.walk('/etc/confluent'):
os.chown(targdir[0], owner, group)

View File

@ -22,7 +22,10 @@
import confluent.config.configmanager as configmanager
import eventlet
import eventlet.tpool
import Cryptodome.Protocol.KDF as KDF
try:
import Cryptodome.Protocol.KDF as KDF
except ImportError:
import Crypto.Protocol.KDF as KDF
from fnmatch import fnmatch
import hashlib
import hmac

View File

@ -7,6 +7,13 @@ import socket
import eventlet.green.subprocess as subprocess
import tempfile
def mkdirp(targ):
try:
return os.makedirs(targ)
except OSError as e:
if e.errno != 17:
raise
def get_openssl_conf_location():
if exists('/etc/pki/tls/openssl.cnf'):
return '/etc/pki/tls/openssl.cnf'
@ -79,31 +86,8 @@ def get_certificate_paths():
def assure_tls_ca():
keyout, certout = ('/etc/confluent/tls/cakey.pem', '/etc/confluent/tls/cacert.pem')
if not os.path.exists(certout):
try:
os.makedirs('/etc/confluent/tls')
except OSError as e:
if e.errno != 17:
raise
sslcfg = get_openssl_conf_location()
tmphdl, tmpconfig = tempfile.mkstemp()
os.close(tmphdl)
shutil.copy2(sslcfg, tmpconfig)
subprocess.check_call(
['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out',
keyout])
try:
subj = '/CN=Confluent TLS Certificate authority ({0})'.format(socket.gethostname())
if len(subj) > 68:
subj = subj[:68]
with open(tmpconfig, 'a') as cfgfile:
cfgfile.write('\n[CACert]\nbasicConstraints = CA:true\n')
subprocess.check_call([
'openssl', 'req', '-new', '-x509', '-key', keyout, '-days',
'27300', '-out', certout, '-subj', subj,
'-extensions', 'CACert', '-config', tmpconfig
])
finally:
os.remove(tmpconfig)
#create_simple_ca(keyout, certout)
create_full_ca(certout)
fname = '/var/lib/confluent/public/site/tls/{0}.pem'.format(
collective.get_myname())
ouid = normalize_uid()
@ -133,6 +117,93 @@ def assure_tls_ca():
pass
os.symlink(certname, hashname)
def substitute_cfg(setting, key, val, newval, cfgfile, line):
if key.strip() == setting:
cfgfile.write(line.replace(val, newval) + '\n')
return True
return False
def create_full_ca(certout):
mkdirp('/etc/confluent/tls/ca/private')
keyout = '/etc/confluent/tls/ca/private/cakey.pem'
csrout = '/etc/confluent/tls/ca/ca.csr'
mkdirp('/etc/confluent/tls/ca/newcerts')
with open('/etc/confluent/tls/ca/index.txt', 'w') as idx:
pass
with open('/etc/confluent/tls/ca/index.txt.attr', 'w') as idx:
idx.write('unique_subject = no')
with open('/etc/confluent/tls/ca/serial', 'w') as srl:
srl.write('01')
sslcfg = get_openssl_conf_location()
newcfg = '/etc/confluent/tls/ca/openssl.cfg'
settings = {
'dir': '/etc/confluent/tls/ca',
'certificate': '$dir/cacert.pem',
'private_key': '$dir/private/cakey.pem',
'countryName': 'optional',
'stateOrProvinceName': 'optional',
'organizationName': 'optional',
}
subj = '/CN=Confluent TLS Certificate authority ({0})'.format(socket.gethostname())
if len(subj) > 68:
subj = subj[:68]
with open(sslcfg, 'r') as cfgin:
with open(newcfg, 'w') as cfgfile:
for line in cfgin.readlines():
cfg = line.split('#')[0]
if '=' in cfg:
key, val = cfg.split('=', 1)
for stg in settings:
if substitute_cfg(stg, key, val, settings[stg], cfgfile, line):
break
else:
cfgfile.write(line.strip() + '\n')
continue
cfgfile.write(line.strip() + '\n')
cfgfile.write('\n[CACert]\nbasicConstraints = CA:true\n\n[ca_confluent]\n')
subprocess.check_call(
['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out',
keyout])
subprocess.check_call(
['openssl', 'req', '-new', '-key', keyout, '-out', csrout, '-subj', subj])
subprocess.check_call(
['openssl', 'ca', '-config', newcfg, '-batch', '-selfsign',
'-extensions', 'CACert', '-extfile', newcfg,
'-notext', '-startdate',
'19700101010101Z', '-enddate', '21000101010101Z', '-keyfile',
keyout, '-out', '/etc/confluent/tls/ca/cacert.pem', '-in', csrout]
)
shutil.copy2('/etc/confluent/tls/ca/cacert.pem', certout)
#openssl ca -config openssl.cnf -selfsign -keyfile cakey.pem -startdate 20150214120000Z -enddate 20160214120000Z
#20160107071311Z -enddate 20170106071311Z
def create_simple_ca(keyout, certout):
try:
os.makedirs('/etc/confluent/tls')
except OSError as e:
if e.errno != 17:
raise
sslcfg = get_openssl_conf_location()
tmphdl, tmpconfig = tempfile.mkstemp()
os.close(tmphdl)
shutil.copy2(sslcfg, tmpconfig)
subprocess.check_call(
['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out',
keyout])
try:
subj = '/CN=Confluent TLS Certificate authority ({0})'.format(socket.gethostname())
if len(subj) > 68:
subj = subj[:68]
with open(tmpconfig, 'a') as cfgfile:
cfgfile.write('\n[CACert]\nbasicConstraints = CA:true\n')
subprocess.check_call([
'openssl', 'req', '-new', '-x509', '-key', keyout, '-days',
'27300', '-out', certout, '-subj', subj,
'-extensions', 'CACert', '-config', tmpconfig
])
finally:
os.remove(tmpconfig)
def create_certificate(keyout=None, certout=None):
if not keyout:
keyout, certout = get_certificate_paths()
@ -170,13 +241,21 @@ def create_certificate(keyout=None, certout=None):
'/CN={0}'.format(longname),
'-extensions', 'SAN', '-config', tmpconfig
])
subprocess.check_call([
'openssl', 'x509', '-req', '-in', csrout,
'-CA', '/etc/confluent/tls/cacert.pem',
'-CAkey', '/etc/confluent/tls/cakey.pem',
'-set_serial', serialnum, '-out', certout, '-days', '27300',
'-extfile', extconfig
])
if os.path.exists('/etc/confluent/tls/cakey.pem'):
subprocess.check_call([
'openssl', 'x509', '-req', '-in', csrout,
'-CA', '/etc/confluent/tls/cacert.pem',
'-CAkey', '/etc/confluent/tls/cakey.pem',
'-set_serial', serialnum, '-out', certout, '-days', '27300',
'-extfile', extconfig
])
else:
subprocess.check_call([
'openssl', 'ca', '-config', '/etc/confluent/tls/ca/openssl.cfg',
'-in', csrout, '-out', certout, '-batch', '-notext',
'-startdate', '19700101010101Z', '-enddate', '21000101010101Z',
'-extfile', extconfig
])
finally:
os.remove(tmpconfig)
os.remove(csrout)

View File

@ -42,10 +42,16 @@
# by passphrase and optionally TPM
import Cryptodome.Protocol.KDF as KDF
from Cryptodome.Cipher import AES
from Cryptodome.Hash import HMAC
from Cryptodome.Hash import SHA256
try:
import Cryptodome.Protocol.KDF as KDF
from Cryptodome.Cipher import AES
from Cryptodome.Hash import HMAC
from Cryptodome.Hash import SHA256
except ImportError:
import Crypto.Protocol.KDF as KDF
from Crypto.Cipher import AES
from Crypto.Hash import HMAC
from Crypto.Hash import SHA256
try:
import anydbm as dbm
except ModuleNotFoundError:
@ -559,6 +565,8 @@ def _load_dict_from_dbm(dpath, tdb):
currdict[tks] = cPickle.loads(dbe[tk]) # nosec
tk = dbe.nextkey(tk)
except dbm.error:
if os.path.exists(tdb):
raise
return
@ -1167,6 +1175,18 @@ def hook_new_configmanagers(callback):
pass
def attribute_name_is_invalid(attrname):
if attrname.startswith('custom.') or attrname.startswith('net.') or attrname.startswith('power.'):
return False
if '?' in attrname or '*' in attrname:
for attr in allattributes.node:
if fnmatch.fnmatch(attr, attrname):
return False
return True
attrname = _get_valid_attrname(attrname)
return attrname not in allattributes.node
class ConfigManager(object):
if os.name == 'nt':
_cfgdir = os.path.join(
@ -1275,6 +1295,9 @@ class ConfigManager(object):
raise Exception('Invalid Expression')
if attribute.startswith('secret.'):
raise Exception('Filter by secret attributes is not supported')
if attribute_name_is_invalid(attribute):
raise ValueError(
'{0} is not a valid attribute name'.format(attribute))
for node in nodes:
try:
currvals = [self._cfgstore['nodes'][node][attribute]['value']]
@ -2583,7 +2606,13 @@ class ConfigManager(object):
with _dirtylock:
dirtyglobals = copy.deepcopy(_cfgstore['dirtyglobals'])
del _cfgstore['dirtyglobals']
globalf = dbm.open(os.path.join(cls._cfgdir, "globals"), 'c', 384) # 0600
try:
globalf = dbm.open(os.path.join(cls._cfgdir, "globals"), 'c', 384) # 0600
except dbm.error:
if not fullsync:
raise
os.remove(os.path.join(cls._cfgdir, "globals"))
globalf = dbm.open(os.path.join(cls._cfgdir, "globals"), 'c', 384) # 0600
try:
for globalkey in dirtyglobals:
if globalkey in _cfgstore['globals']:
@ -2596,8 +2625,15 @@ class ConfigManager(object):
globalf.close()
if fullsync or 'collectivedirty' in _cfgstore:
if len(_cfgstore.get('collective', ())) > 1:
collectivef = dbm.open(os.path.join(cls._cfgdir, "collective"),
'c', 384)
try:
collectivef = dbm.open(os.path.join(cls._cfgdir, 'collective'),
'c', 384)
except dbm.error:
if not fullsync:
raise
os.remove(os.path.join(cls._cfgdir, 'collective'))
collectivef = dbm.open(os.path.join(cls._cfgdir, 'collective'),
'c', 384)
try:
if fullsync:
colls = _cfgstore['collective']
@ -2624,7 +2660,13 @@ class ConfigManager(object):
currdict = _cfgstore['main']
for category in currdict:
_mkpath(pathname)
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
try:
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
except dbm.error:
if not fullsync:
raise
os.remove(os.path.join(pathname, category))
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
try:
for ck in currdict[category]:
dbf[ck] = cPickle.dumps(currdict[category][ck], protocol=cPickle.HIGHEST_PROTOCOL)
@ -2644,7 +2686,13 @@ class ConfigManager(object):
currdict = _cfgstore['tenant'][tenant]
for category in dkdict:
_mkpath(pathname)
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
try:
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
except dbm.error:
if not fullsync:
raise
os.remove(os.path.join(pathname, category))
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
try:
for ck in dkdict[category]:
if ck not in currdict[category]:

View File

@ -811,9 +811,17 @@ def handle_dispatch(connection, cert, dispatch, peername):
operation = dispatch['operation']
pathcomponents = dispatch['path']
routespec = nested_lookup(noderesources, pathcomponents)
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata, nodes, dispatch['isnoderange'],
configmanager)
try:
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata, nodes, dispatch['isnoderange'],
configmanager)
except Exception as res:
with xmitlock:
_forward_rsp(connection, res)
keepalive.kill()
connection.sendall('\x00\x00\x00\x00\x00\x00\x00\x00')
connection.close()
return
plugroute = routespec.routeinfo
plugpath = None
nodesbyhandler = {}

View File

@ -29,6 +29,10 @@ import atexit
import confluent.auth as auth
import confluent.config.conf as conf
import confluent.config.configmanager as configmanager
try:
import anydbm as dbm
except ModuleNotFoundError:
import dbm
import confluent.consoleserver as consoleserver
import confluent.core as confluentcore
import confluent.httpapi as httpapi
@ -62,8 +66,10 @@ import os
import glob
import signal
import socket
import subprocess
import time
import traceback
import tempfile
import uuid
@ -232,8 +238,21 @@ def sanity_check():
assure_ownership('/etc/confluent/srvcert.pem')
def migrate_db():
tdir = tempfile.mkdtemp()
subprocess.check_call(['python3', '-c', 'pass'])
subprocess.check_call(['python2', '/opt/confluent/bin/confluentdbutil', 'dump', '-u', tdir])
subprocess.check_call(['python3', '/opt/confluent/bin/confluentdbutil', 'restore', '-u', tdir])
subprocess.check_call(['rm', '-rf', tdir])
configmanager.init()
def run(args):
setlimits()
try:
configmanager.ConfigManager(None)
except dbm.error:
migrate_db()
try:
signal.signal(signal.SIGUSR1, dumptrace)
except AttributeError:

View File

@ -0,0 +1,232 @@
# Copyright 2022 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import confluent.util as util
import confluent.messages as msg
import confluent.exceptions as exc
import eventlet
import re
import hashlib
import json
import time
#eaton uses 'eval' rather than json, massage it to be valid json
def sanitize_json(data):
if not isinstance(data, str):
data = data.decode('utf8')
return re.sub(r'([^ {:,]*):', r'"\1":', data).replace("'", '"')
def answer_challenge(username, password, data):
realm = data[0]
nonce = data[1].encode('utf8')
cnonce = data[2].encode('utf8')
uri = data[3].encode('utf8')
operation = data[4].encode('utf8')
incvalue = '{:08d}'.format(int(data[5])).encode('utf8')
a1 = hashlib.md5(':'.join([username, realm, password]).encode('utf8')).digest()
a1 = b':'.join([a1, nonce, cnonce])
skey = hashlib.md5(a1).hexdigest().encode('utf8')
ac2 = b'AUTHENTICATE:' + uri
s2c = hashlib.md5(ac2).hexdigest().encode('utf8')
rsp = hashlib.md5(b':'.join([skey, nonce, incvalue, cnonce, operation, s2c])).hexdigest().encode('utf8')
a2server = b':' + uri
s2server = hashlib.md5(a2server).hexdigest().encode('utf8')
s2rsp = hashlib.md5(b':'.join([skey, nonce, incvalue, cnonce, operation, s2server])).hexdigest().encode('utf8')
return {'sessionKey': skey.decode('utf8'), 'szResponse': rsp.decode('utf8'), 'szResponseValue': s2rsp.decode('utf8')}
try:
import Cookie
httplib = eventlet.import_patched('httplib')
except ImportError:
httplib = eventlet.import_patched('http.client')
import http.cookies as Cookie
# Delta PDU webserver always closes connection,
# replace conditionals with always close
class WebResponse(httplib.HTTPResponse):
def _check_close(self):
return True
class WebConnection(httplib.HTTPConnection):
response_class = WebResponse
def __init__(self, host):
httplib.HTTPConnection.__init__(self, host, 80)
self.cookies = {}
def getresponse(self):
try:
rsp = super(WebConnection, self).getresponse()
try:
hdrs = [x.split(':', 1) for x in rsp.msg.headers]
except AttributeError:
hdrs = rsp.msg.items()
for hdr in hdrs:
if hdr[0] == 'Set-Cookie':
c = Cookie.BaseCookie(hdr[1])
for k in c:
self.cookies[k] = c[k].value
except httplib.BadStatusLine:
self.broken = True
raise
return rsp
def request(self, method, url, body=None):
headers = {}
if body:
headers['Content-Length'] = len(body)
cookies = []
for cookie in self.cookies:
cookies.append('{0}={1}'.format(cookie, self.cookies[cookie]))
headers['Cookie'] = ';'.join(cookies)
headers['Host'] = 'pdu.cluster.net'
headers['Accept'] = '*/*'
headers['Accept-Language'] = 'en-US,en;q=0.9'
headers['Connection'] = 'close'
headers['Referer'] = 'http://pdu.cluster.net/setting_admin.htm'
return super(WebConnection, self).request(method, url, body, headers)
def grab_response(self, url, body=None, method=None):
if method is None:
method = 'GET' if body is None else 'POST'
if body:
self.request(method, url, body)
else:
self.request(method, url)
rsp = self.getresponse()
body = rsp.read()
return body, rsp.status
class PDUClient(object):
def __init__(self, pdu, configmanager):
self.node = pdu
self.configmanager = configmanager
self._token = None
self._wc = None
self.username = None
self.sessid = None
@property
def wc(self):
if self._wc:
return self._wc
targcfg = self.configmanager.get_node_attributes(self.node,
['hardwaremanagement.manager'],
decrypt=True)
targcfg = targcfg.get(self.node, {})
target = targcfg.get(
'hardwaremanagement.manager', {}).get('value', None)
if not target:
target = self.node
self._wc = WebConnection(target)
self.login(self.configmanager)
return self._wc
def login(self, configmanager):
credcfg = configmanager.get_node_attributes(self.node,
['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword'],
decrypt=True)
credcfg = credcfg.get(self.node, {})
username = credcfg.get(
'secret.hardwaremanagementuser', {}).get('value', None)
passwd = credcfg.get(
'secret.hardwaremanagementpassword', {}).get('value', None)
if not isinstance(username, str):
username = username.decode('utf8')
if not isinstance(passwd, str):
passwd = passwd.decode('utf8')
if not username or not passwd:
raise Exception('Missing username or password')
b64user = base64.b64encode(username.encode('utf8')).decode('utf8')
rsp = self.wc.grab_response('/config/gateway?page=cgi_authentication&login={}&_dc={}'.format(b64user, int(time.time())))
rsp = json.loads(sanitize_json(rsp[0]))
parms = answer_challenge(username, passwd, rsp['data'][-1])
self.sessid = rsp['data'][0]
url = '/config/gateway?page=cgi_authenticationChallenge&sessionId={}&login={}&sessionKey={}&szResponse={}&szResponseValue={}&dc={}'.format(
rsp['data'][0],
b64user,
parms['sessionKey'],
parms['szResponse'],
parms['szResponseValue'],
int(time.time()),
)
rsp = self.wc.grab_response(url)
rsp = json.loads(sanitize_json(rsp[0]))
if rsp['success'] != True:
raise Exception('Failed to login to device')
rsp = self.wc.grab_response('/config/gateway?page=cgi_checkUserSession&sessionId={}&_dc={}'.format(self.sessid, int(time.time())))
def do_request(self, suburl):
wc = self.wc
url = '/config/gateway?page={}&sessionId={}&_dc={}'.format(suburl, self.sessid, int(time.time()))
return wc.grab_response(url)
def logout(self):
print(repr(self.do_request('cgi_logout')))
def get_outlet(self, outlet):
rsp = self.do_request('cgi_pdu_outlets')
data = sanitize_json(rsp[0])
data = json.loads(data)
data = data['data'][0]
for outdata in data:
outdata = outdata[0]
if outdata[0] == outlet:
return 'on' if outdata[3] else 'off'
return
def set_outlet(self, outlet, state):
rsp = self.do_request('cgi_pdu_outlets')
data = sanitize_json(rsp[0])
data = json.loads(data)
data = data['data'][0]
idx = 1
for outdata in data:
outdata = outdata[0]
if outdata[0] == outlet:
payload = "<SET_OBJECT><OBJECT name='PDU.OutletSystem.Outlet[{}].DelayBefore{}'>0</OBJECT>".format(idx, 'Startup' if state == 'on' else 'Shutdown')
rsp = self.wc.grab_response('/config/set_object_mass.xml?sessionId={}'.format(self.sessid), payload)
return
idx += 1
def retrieve(nodes, element, configmanager, inputdata):
if 'outlets' not in element:
for node in nodes:
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
return
for node in nodes:
gc = PDUClient(node, configmanager)
try:
state = gc.get_outlet(element[-1])
yield msg.PowerState(node=node, state=state)
finally:
gc.logout()
def update(nodes, element, configmanager, inputdata):
if 'outlets' not in element:
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
return
for node in nodes:
gc = PDUClient(node, configmanager)
newstate = inputdata.powerstate(node)
try:
gc.set_outlet(element[-1], newstate)
finally:
gc.logout()
eventlet.sleep(2)
for res in retrieve(nodes, element, configmanager, inputdata):
yield res

View File

@ -124,19 +124,21 @@ class SshShell(conapi.Console):
self.ssh.set_missing_host_key_policy(
HostKeyHandler(self.nodeconfig, self.node))
try:
self.datacallback('\r\nConnecting to {}...'.format(self.node))
self.ssh.connect(self.node, username=self.username,
password=self.password, allow_agent=False,
look_for_keys=False)
except paramiko.AuthenticationException:
except paramiko.AuthenticationException as e:
self.ssh.close()
self.inputmode = 0
self.username = b''
self.password = b''
self.datacallback('\r\nError connecting to {0}:\r\n {1}\r\n'.format(self.node, str(e)))
self.datacallback('\r\nlogin as: ')
return
except paramiko.ssh_exception.NoValidConnectionsError as e:
self.ssh.close()
self.datacallback(str(e))
self.datacallback('\r\nError connecting to {0}:\r\n {1}\r\n'.format(self.node, str(e)))
self.inputmode = 0
self.username = b''
self.password = b''
@ -162,10 +164,20 @@ class SshShell(conapi.Console):
'and permissions on /etc/ssh/*key)\r\n' \
'Press Enter to close...'
self.datacallback('\r\n' + warn)
return
except Exception as e:
self.ssh.close()
self.ssh.close()
self.inputmode = 0
self.username = b''
self.password = b''
warn = 'Error connecting to {0}:\r\n {1}\r\n'.format(self.node, str(e))
self.datacallback('\r\n' + warn)
self.datacallback('\r\nlogin as: ')
return
self.inputmode = 2
self.connected = True
self.datacallback('Connected\r\n')
self.shell = self.ssh.invoke_shell(width=self.width,
height=self.height)
self.rxthread = eventlet.spawn(self.recvdata)

View File

@ -120,6 +120,8 @@ def handle_request(env, start_response):
start_response('401 Unauthorized', [])
yield 'Unauthorized'
return
if not isinstance(eak, str):
eak = eak.decode('utf8')
salt = '$'.join(eak.split('$', 3)[:-1]) + '$'
if crypt.crypt(apikey, salt) != eak:
start_response('401 Unauthorized', [])

View File

@ -41,6 +41,10 @@ class _ShellHandler(consoleserver.ConsoleHandler):
return
#return super().feedbuffer(data)
def get_recent(self):
retdata, connstate = super(_ShellHandler, self).get_recent()
return '', connstate
def _got_disconnected(self):
self.connectstate = 'closed'
self._send_rcpts({'connectstate': self.connectstate})

View File

@ -13,13 +13,17 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
Prefix: %{_prefix}
BuildArch: noarch
Requires: confluent_vtbufferd
%if "%{dist}" == ".el7"
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python2-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic
%else
%if "%{dist}" == ".el8"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
%else
%if "%{dist}" == ".el9"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
%else
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python2-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute
%endif
%endif
%endif
Vendor: Jarrod Johnson <jjohnson2@lenovo.com>
@ -32,25 +36,17 @@ Server for console management and systems management aggregation
%setup -n %{name}-%{version} -n %{name}-%{version}
%build
%if "%{dist}" == ".el8"
python3 setup.py build
%else
%if "%{dist}" == ".el9"
python3 setup.py build
%else
%if "%{dist}" == ".el7"
python2 setup.py build
%endif
%else
python3 setup.py build
%endif
%install
%if "%{dist}" == ".el8"
python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES.bare --install-purelib=/opt/confluent/lib/python --install-scripts=/opt/confluent/bin
%else
%if "%{dist}" == ".el9"
python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES.bare --install-purelib=/opt/confluent/lib/python --install-scripts=/opt/confluent/bin
%else
%if "%{dist}" == ".el7"
python2 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES.bare --install-purelib=/opt/confluent/lib/python --install-scripts=/opt/confluent/bin
%endif
%else
python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES.bare --install-purelib=/opt/confluent/lib/python --install-scripts=/opt/confluent/bin
%endif
for file in $(grep confluent/__init__.py INSTALLED_FILES.bare); do
rm $RPM_BUILD_ROOT/$file
@ -101,3 +97,4 @@ rm -rf $RPM_BUILD_ROOT
%files -f INSTALLED_FILES
%license /opt/confluent/share/licenses/confluent_server/LICENSE
%defattr(-,root,root)
/opt/confluent

View File

@ -34,7 +34,7 @@ mkdir -p opt/confluent/lib/imgutil
mkdir -p opt/confluent/bin
mv imgutil opt/confluent/bin/
chmod a+x opt/confluent/bin/imgutil
mv ubuntu suse15 el7 el8 opt/confluent/lib/imgutil/
mv ubuntu suse15 el7 el9 el8 opt/confluent/lib/imgutil/
mkdir -p opt/confluent/share/licenses/confluent_imgutil
cp LICENSE opt/confluent/share/licenses/confluent_imgutil

View File

@ -0,0 +1,35 @@
dracut_install mktemp
dracut_install /lib64/libtss2-tcti-device.so.0
dracut_install tpm2_create tpm2_pcrread tpm2_createpolicy tpm2_createprimary
dracut_install tpm2_load tpm2_unseal tpm2_getcap tpm2_evictcontrol
dracut_install tpm2_pcrextend tpm2_policypcr tpm2_flushcontext tpm2_startauthsession
dracut_install curl openssl tar cpio gzip lsmod ethtool xz lsmod ethtool
dracut_install modprobe touch echo cut wc bash uniq grep ip hostname
dracut_install awk egrep dirname expr sort
dracut_install ssh sshd reboot parted mkfs mkfs.ext4 mkfs.xfs xfs_db mkswap
dracut_install efibootmgr uuidgen
dracut_install du df ssh-keygen scp clear dhclient
dracut_install /lib64/libnss_dns-2.28.so /lib64/libnss_dns.so.2
dracut_install /usr/lib64/libnl-3.so.200
dracut_install /etc/nsswitch.conf /etc/services /etc/protocols
dracut_install chmod whoami head tail basename tr
dracut_install /usr/sbin/arping /usr/sbin/dhclient-script ipcalc logger hostnamectl
inst /bin/bash /bin/sh
dracut_install /lib64/libfuse.so.2 /lib64/libfuse.so.2.9.7
dracut_install chown chroot dd expr kill parted rsync sort blockdev findfs insmod lvm
dracut_install /usr/lib/udev/rules.d/10-dm.rules /usr/sbin/dmsetup /usr/lib/udev/rules.d/95-dm-notify.rules
dracut_install /usr/lib/udev/rules.d/60-net.rules /lib/udev/rename_device /usr/lib/systemd/network/99-default.link
dracut_install /lib64/libpthread.so.0
dracut_install losetup # multipart support
#this would be nfs with lock, but not needed, go nolock
#dracut_install mount.nfs rpcbind rpc.statd /etc/netconfig sm-notify
#dracut_install mount.nfs /etc/netconfig
inst /usr/lib/dracut/modules.d/40network/net-lib.sh /lib/net-lib.sh
# network mount, and disk imaging helpers can come from a second stage
# this is narrowly focused on getting network up and fetching images
# and those images may opt to do something with cloning or whatever

View File

@ -0,0 +1,11 @@
#!/bin/bash
instmods nfsv3 nfs_acl nfsv4 dns_resolver lockd fscache sunrpc
instmods e1000 e1000e igb sfc mlx5_ib mlx5_core mlx4_en cxgb3 cxgb4 tg3 bnx2 bnx2x bna ixgb ixgbe qlge mptsas mpt2sas mpt3sas megaraid_sas ahci xhci-hcd sd_mod pmcraid be2net vfat ext3 ext4 usb_storage scsi_wait_scan ipmi_si ipmi_devintf qlcnic xfs
instmods nvme
instmods cdc_ether
instmods mptctl
instmods mlx4_ib mlx5_ub ib_umad ib_ipoib
instmods ice i40e hfi1 bnxt_en qed qede
instmods dm-mod dm-log raid0 raid1 raid10 raid456 dm-raid dm-thin-pool dm-crypt dm-snapshot linear dm-era
# nfs root and optionally gocryptfs
instmods fuse overlay squashfs loop zram

20
imgutil/el9/pkglist Normal file
View File

@ -0,0 +1,20 @@
dnf
hostname
irqbalance
less
sssd-client
NetworkManager
nfs-utils
numactl-libs
passwd
rootfiles
sudo
tuned
yum
initscripts
tpm2-tools
xfsprogs
e2fsprogs
fuse-libs
libnl3
chrony kernel net-tools nfs-utils openssh-server rsync tar util-linux python3 tar dracut dracut-network ethtool parted openssl dhclient openssh-clients bash vim-minimal rpm iputils lvm2 efibootmgr shim-x64.x86_64 grub2-efi-x64 attr

View File

@ -904,7 +904,7 @@ def fingerprint_source_suse(files, sourcepath, args):
def fingerprint_source_el(files, sourcepath, args):
for filen in files:
if '-release-8' in filen or '-release-7' in filen:
if '-release-8' in filen or '-release-7' in filen or '-release-9' in filen:
parts = filen.split('-')
osname = '_'.join(parts[:-3])
if osname == 'centos_linux':
@ -929,6 +929,7 @@ def fingerprint_source(sourcepath, args):
return oshandler
def fingerprint_host_el(args, hostpath='/'):
release = ''
if hostpath[0] != '/':
hostpath = os.path.join(os.getcwd(), hostpath)
try:
@ -936,7 +937,7 @@ def fingerprint_host_el(args, hostpath='/'):
ts = rpm.TransactionSet(hostpath)
rpms = ts.dbMatch('provides', 'system-release')
for inf in rpms:
if 'el8' not in inf.release and 'el7' not in inf.release:
if 'el8' not in inf.release and 'el7' not in inf.release and 'el9' not in inf.release:
continue
osname = inf.name
version = inf.version
@ -960,7 +961,7 @@ def fingerprint_host_el(args, hostpath='/'):
version = v
except subprocess.SubprocessError:
return None
if 'el8' not in release and 'el7' not in release:
if 'el8' not in release and 'el7' not in release and 'el9' not in release:
return None
osname = osname.replace('-release', '').replace('-', '_')
if osname == 'centos_linux':
@ -1267,6 +1268,11 @@ def gather_bootloader(outdir, rootpath='/'):
grubbin = os.path.join(rootpath, 'usr/lib64/efi/grub.efi')
if not os.path.exists(grubbin):
grubbin = os.path.join(rootpath, 'usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed')
if not os.path.exists(grubbin):
grubs = os.path.join(rootpath, 'boot/efi/EFI/*/grubx64.efi')
grubs = glob.glob(grubs)
if len(grubs) == 1:
grubbin = grubs[0]
shutil.copyfile(grubbin, os.path.join(outdir, 'boot/efi/boot/grubx64.efi'))
shutil.copyfile(grubbin, os.path.join(outdir, 'boot/efi/boot/grub.efi'))

10
misc/getpass.py Normal file
View File

@ -0,0 +1,10 @@
import confluent.config.configmanager as cfm
import sys
c = cfm.ConfigManager(None)
cfg = c.get_node_attributes(sys.argv[1], 'secret.*', decrypt=True)
for node in cfg:
for attr in cfg[node]:
val = cfg[node][attr]['value']
if not isinstance(val, str):
val = val.decode('utf8')
print('{}: {}'.format(attr, val))

18
misc/swraid Normal file
View File

@ -0,0 +1,18 @@
DEVICES="/dev/sda /dev/sdb"
RAIDLEVEL=1
mdadm --detail /dev/md*|grep 'Version : 1.0' >& /dev/null && exit 0
lvm vgchange -a n
mdadm -S -s
NUMDEVS=$(for dev in $DEVICES; do
echo wipefs -a $dev
done|wc -l)
for dev in $DEVICES; do
wipefs -a $dev
done
# must use older metadata format to leave disks looking normal for uefi
mdadm -C /dev/md/raid $DEVICES -n $NUMDEVS -e 1.0 -l $RAIDLEVEL
# shut and restart array to prime things for anaconda
mdadm -S -s
mdadm --assemble --scan
readlink /dev/md/raid|sed -e 's/.*\///' > /tmp/installdisk

15
misc/vroc Normal file
View File

@ -0,0 +1,15 @@
DEVICES="/dev/sda /dev/sdb"
RAIDLEVEL=1
mdadm --detail /dev/md* | grep imsm >& /dev/null && exit 0
lvm vgchange -a n
mdadm -S -s
NUMDEVS=$(for dev in $DEVICES; do
echo wipefs -a $dev
done|wc -l)
for dev in $DEVICES; do
wipefs -a $dev
done
mdadm -C /dev/md/imsm0 $DEVICES -n $NUMDEVS -e imsm
mdadm -C /dev/md/md0_0 /dev/md/imsm0 -n $NUMDEVS -l $RAIDLEVEL
mdadm -S -s
mdadm --assemble --scan