mirror of
https://github.com/xcat2/confluent.git
synced 2025-02-19 20:16:04 +00:00
Merge branch 'xcat2:master' into staging
This commit is contained in:
commit
901f633848
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python2
|
||||
#!/usr/libexec/platform-python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2015-2017 Lenovo
|
||||
@ -15,13 +15,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__author__ = 'jjohnson2,alin37'
|
||||
__author__ = 'jjohnson2,alin37,wgrzeda'
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
|
||||
|
||||
try:
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
except AttributeError:
|
||||
@ -39,9 +41,12 @@ def main():
|
||||
" or: %prog [options] noderange <nodeattribute>...")
|
||||
argparser.add_option('-b', '--blame', action='store_true',
|
||||
help='Show information about how attributes inherited')
|
||||
argparser.add_option('-d', '--delim', metavar="STRING", default = "\n",
|
||||
help='Delimiter separating the values')
|
||||
(options, args) = argparser.parse_args()
|
||||
noderange=""
|
||||
nodelist=""
|
||||
list = []
|
||||
try:
|
||||
noderange = args[0]
|
||||
nodelist = '/noderange/{0}/nodes/'.format(noderange)
|
||||
@ -61,7 +66,9 @@ def main():
|
||||
sys.stderr.write(res['error'] + '\n')
|
||||
exitcode = 1
|
||||
else:
|
||||
print(res['item']['href'].replace('/', ''))
|
||||
elem=(res['item']['href'].replace('/', ''))
|
||||
list.append(elem)
|
||||
print(options.delim.join(list))
|
||||
|
||||
sys.exit(exitcode)
|
||||
|
||||
|
@ -4,7 +4,7 @@ nodelist(8) -- List confluent nodes and their attributes
|
||||
## SYNOPSIS
|
||||
|
||||
`nodelist <noderange>`
|
||||
`nodelist <noderange> [-b] <nodeattribute>...`
|
||||
`nodelist <noderange> [-b] [-d] {string} <nodeattribute>...`
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
@ -24,7 +24,8 @@ all attributes that begin with `net.` and end with `switch`.
|
||||
|
||||
* `-b`, `--blame`:
|
||||
Annotate inherited and expression based attributes to show their base value.
|
||||
|
||||
* `-d`, `--delim`:
|
||||
Choose a delimiter to separat the values. Default - ENTER.
|
||||
## EXAMPLES
|
||||
* Listing matching nodes of a simple noderange:
|
||||
`# nodelist n1-n4`
|
||||
|
@ -227,7 +227,7 @@ def get_apikey(nodename, hosts, errout=None):
|
||||
return apikey
|
||||
|
||||
class HTTPSClient(client.HTTPConnection, object):
|
||||
def __init__(self, usejson=False, port=443, host=None, errout=None, phmac=None):
|
||||
def __init__(self, usejson=False, port=443, host=None, errout=None, phmac=None, checkonly=False):
|
||||
self.phmac = phmac
|
||||
self.errout = None
|
||||
if errout:
|
||||
@ -291,7 +291,7 @@ class HTTPSClient(client.HTTPConnection, object):
|
||||
if self.phmac:
|
||||
with open(phmac, 'r') as hmacin:
|
||||
self.stdheaders['CONFLUENT_CRYPTHMAC'] = hmacin.read()
|
||||
else:
|
||||
elif not checkonly:
|
||||
self.stdheaders['CONFLUENT_APIKEY'] = get_apikey(node, self.hosts, errout=self.errout)
|
||||
if mgtiface:
|
||||
self.stdheaders['CONFLUENT_MGTIFACE'] = mgtiface
|
||||
@ -468,7 +468,7 @@ if __name__ == '__main__':
|
||||
outf.write(chunk)
|
||||
chunk = reader.read(16384)
|
||||
sys.exit(0)
|
||||
client = HTTPSClient(usejson, errout=errout, phmac=phmac)
|
||||
client = HTTPSClient(usejson, errout=errout, phmac=phmac, checkonly=checkonly)
|
||||
if waitfor:
|
||||
status = 201
|
||||
while status != waitfor:
|
||||
|
@ -8,6 +8,10 @@ import sys
|
||||
import time
|
||||
import shlex
|
||||
import subprocess
|
||||
try:
|
||||
import yaml
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
from importlib.machinery import SourceFileLoader
|
||||
def load_source(mod, path):
|
||||
@ -107,6 +111,103 @@ def get_interface_name(iname, settings):
|
||||
return iname
|
||||
return None
|
||||
|
||||
class NetplanManager(object):
|
||||
def __init__(self, deploycfg):
|
||||
self.cfgbydev = {}
|
||||
self.read_connections()
|
||||
self.deploycfg = deploycfg
|
||||
|
||||
def read_connections(self):
|
||||
for plan in glob.glob('/etc/netplan/*.y*ml'):
|
||||
with open(plan) as planfile:
|
||||
planinfo = yaml.safe_load(planfile)
|
||||
if not planinfo:
|
||||
continue
|
||||
nicinfo = planinfo.get('network', {}).get('ethernets', {})
|
||||
for devname in nicinfo:
|
||||
if devname == 'lo':
|
||||
continue
|
||||
if 'gateway4' in nicinfo[devname]:
|
||||
# normalize deprecated syntax on read in
|
||||
gw4 = nicinfo[devname]['gateway4']
|
||||
del nicinfo[devname]['gateway4']
|
||||
routeinfo = nicinfo[devname].get('routes', [])
|
||||
for ri in routeinfo:
|
||||
if ri.get('via', None) == gw4 and ri.get('to', None) in ('default', '0.0.0.0/0', '0/0'):
|
||||
break
|
||||
else:
|
||||
routeinfo.append({
|
||||
'to': 'default',
|
||||
'via': gw4
|
||||
})
|
||||
nicinfo[devname]['routes'] = routeinfo
|
||||
self.cfgbydev[devname] = nicinfo[devname]
|
||||
|
||||
def apply_configuration(self, cfg):
|
||||
devnames = cfg['interfaces']
|
||||
if len(devnames) != 1:
|
||||
raise Exception('Multi-nic team/bonds not yet supported')
|
||||
stgs = cfg['settings']
|
||||
needcfgapply = False
|
||||
for devname in devnames:
|
||||
needcfgwrite = False
|
||||
if stgs['ipv6_method'] == 'static':
|
||||
curraddr = stgs['ipv6_address']
|
||||
currips = self.getcfgarrpath([devname, 'addresses'])
|
||||
if curraddr not in currips:
|
||||
needcfgwrite = True
|
||||
currips.append(curraddr)
|
||||
if stgs['ipv4_method'] == 'static':
|
||||
curraddr = stgs['ipv4_address']
|
||||
currips = self.getcfgarrpath([devname, 'addresses'])
|
||||
if curraddr not in currips:
|
||||
needcfgwrite = True
|
||||
currips.append(curraddr)
|
||||
gws = []
|
||||
gws.append(stgs.get('ipv4_gateway', None))
|
||||
gws.append(stgs.get('ipv6_gateway', None))
|
||||
for gwaddr in gws:
|
||||
if gwaddr:
|
||||
cfgroutes = self.getcfgarrpath([devname, 'routes'])
|
||||
for rinfo in cfgroutes:
|
||||
if rinfo.get('via', None) == gwaddr:
|
||||
break
|
||||
else:
|
||||
needcfgwrite = True
|
||||
cfgroutes.append({'via': gwaddr, 'to': 'default'})
|
||||
dnsips = self.deploycfg.get('nameservers', [])
|
||||
dnsdomain = self.deploycfg.get('dnsdomain', '')
|
||||
if dnsips:
|
||||
currdnsips = self.getcfgarrpath([devname, 'nameservers', 'addresses'])
|
||||
for dnsip in dnsips:
|
||||
if dnsip not in currdnsips:
|
||||
needcfgwrite = True
|
||||
currdnsips.append(dnsip)
|
||||
if dnsdomain:
|
||||
currdnsdomain = self.getcfgarrpath([devname, 'nameservers', 'search'])
|
||||
if dnsdomain not in currdnsdomain:
|
||||
needcfgwrite = True
|
||||
currdnsdomain.append(dnsdomain)
|
||||
if needcfgwrite:
|
||||
needcfgapply = True
|
||||
newcfg = {'network': {'version': 2, 'ethernets': {devname: self.cfgbydev[devname]}}}
|
||||
with open('/etc/netplan/{0}-confluentcfg.yaml'.format(devname), 'w') as planout:
|
||||
planout.write(yaml.dump(newcfg))
|
||||
if needcfgapply:
|
||||
subprocess.call(['netplan', 'apply'])
|
||||
|
||||
def getcfgarrpath(self, devpath):
|
||||
currptr = self.cfgbydev
|
||||
for k in devpath[:-1]:
|
||||
if k not in currptr:
|
||||
currptr[k] = {}
|
||||
currptr = currptr[k]
|
||||
if devpath[-1] not in currptr:
|
||||
currptr[devpath[-1]] = []
|
||||
return currptr[devpath[-1]]
|
||||
|
||||
|
||||
|
||||
class WickedManager(object):
|
||||
def __init__(self):
|
||||
self.teamidx = 0
|
||||
@ -253,6 +354,7 @@ class NetworkManager(object):
|
||||
|
||||
def apply_configuration(self, cfg):
|
||||
cmdargs = {}
|
||||
cmdargs['connection.autoconnect'] = 'yes'
|
||||
stgs = cfg['settings']
|
||||
cmdargs['ipv6.method'] = stgs.get('ipv6_method', 'link-local')
|
||||
if stgs.get('ipv6_address', None):
|
||||
@ -315,6 +417,7 @@ if __name__ == '__main__':
|
||||
myaddrs = apiclient.get_my_addresses()
|
||||
srvs, _ = apiclient.scan_confluents()
|
||||
doneidxs = set([])
|
||||
dc = None
|
||||
for srv in srvs:
|
||||
try:
|
||||
s = socket.create_connection((srv, 443))
|
||||
@ -334,6 +437,9 @@ if __name__ == '__main__':
|
||||
continue
|
||||
status, nc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/netcfg')
|
||||
nc = json.loads(nc)
|
||||
if not dc:
|
||||
status, dc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/deploycfg2')
|
||||
dc = json.loads(dc)
|
||||
iname = get_interface_name(idxmap[curridx], nc.get('default', {}))
|
||||
if iname:
|
||||
for iname in iname.split(','):
|
||||
@ -359,6 +465,8 @@ if __name__ == '__main__':
|
||||
if not netname_to_interfaces['default']['interfaces']:
|
||||
del netname_to_interfaces['default']
|
||||
rm_tmp_llas(tmpllas)
|
||||
if os.path.exists('/usr/sbin/netplan'):
|
||||
nm = NetplanManager(dc)
|
||||
if os.path.exists('/usr/bin/nmcli'):
|
||||
nm = NetworkManager(devtypes)
|
||||
elif os.path.exists('/usr/sbin/wicked'):
|
||||
|
@ -42,7 +42,7 @@ for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 coreo
|
||||
mv ../addons.cpio .
|
||||
cd ..
|
||||
done
|
||||
for os in el7 el8 suse15 el9 ubuntu20.04; do
|
||||
for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04; do
|
||||
mkdir ${os}disklessout
|
||||
cd ${os}disklessout
|
||||
if [ -d ../${os}bin ]; then
|
||||
|
@ -11,6 +11,13 @@ import struct
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
def get_partname(devname, idx):
|
||||
if devname[-1] in '0123456789':
|
||||
return '{}p{}'.format(devname, idx)
|
||||
else:
|
||||
return '{}{}'.format(devname, idx)
|
||||
|
||||
|
||||
def get_next_part_meta(img, imgsize):
|
||||
if img.tell() == imgsize:
|
||||
return None
|
||||
@ -53,10 +60,13 @@ class PartedRunner():
|
||||
def __init__(self, disk):
|
||||
self.disk = disk
|
||||
|
||||
def run(self, command):
|
||||
def run(self, command, check=True):
|
||||
command = command.split()
|
||||
command = ['parted', '-a', 'optimal', '-s', self.disk] + command
|
||||
return subprocess.check_output(command).decode('utf8')
|
||||
if check:
|
||||
return subprocess.check_output(command).decode('utf8')
|
||||
else:
|
||||
return subprocess.run(command, stdout=subprocess.PIPE).stdout.decode('utf8')
|
||||
|
||||
def fixup(rootdir, vols):
|
||||
devbymount = {}
|
||||
@ -166,6 +176,8 @@ def fixup(rootdir, vols):
|
||||
partnum = re.search('(\d+)$', targdev).group(1)
|
||||
targblock = re.search('(.*)\d+$', targdev).group(1)
|
||||
if targblock:
|
||||
if targblock.endswith('p') and 'nvme' in targblock:
|
||||
targblock = targblock[:-1]
|
||||
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
|
||||
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
|
||||
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
|
||||
@ -224,7 +236,8 @@ def install_to_disk(imgpath):
|
||||
instdisk = diskin.read()
|
||||
instdisk = '/dev/' + instdisk
|
||||
parted = PartedRunner(instdisk)
|
||||
dinfo = parted.run('unit s print')
|
||||
# do this safer, unit s print might bomb
|
||||
dinfo = parted.run('unit s print', check=False)
|
||||
dinfo = dinfo.split('\n')
|
||||
sectors = 0
|
||||
sectorsize = 0
|
||||
@ -258,7 +271,7 @@ def install_to_disk(imgpath):
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
|
||||
vol['targetdisk'] = instdisk + '{0}'.format(volidx)
|
||||
vol['targetdisk'] = get_partname(instdisk , volidx)
|
||||
curroffset += size + 1
|
||||
if not lvmvols:
|
||||
if swapsize:
|
||||
@ -268,10 +281,10 @@ def install_to_disk(imgpath):
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
|
||||
subprocess.check_call(['mkswap', instdisk + '{}'.format(volidx + 1)])
|
||||
subprocess.check_call(['mkswap', get_partname(instdisk, volidx + 1)])
|
||||
else:
|
||||
parted.run('mkpart lvm {}s 100%'.format(curroffset))
|
||||
lvmpart = instdisk + '{}'.format(volidx + 1)
|
||||
lvmpart = get_partname(instdisk, volidx + 1)
|
||||
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
|
||||
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
|
||||
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
|
||||
|
@ -125,4 +125,6 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
@ -7,6 +7,7 @@ if [ -f /tmp/dd_disk ]; then
|
||||
fi
|
||||
done
|
||||
fi
|
||||
shutdownnic=""
|
||||
oum=$(umask)
|
||||
umask 0077
|
||||
mkdir -p /etc/confluent
|
||||
@ -26,6 +27,13 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
|
||||
deploysrvs=$(sed -n '/^deploy_servers:/, /^[^-]/p' cnflnt.yml |grep ^-|sed -e 's/^- //'|grep -v :)
|
||||
|
||||
nodename=$(grep ^nodename: cnflnt.yml|awk '{print $2}')
|
||||
ln -s /opt/confluent/bin/clortho /opt/confluent/bin/genpasshmac
|
||||
hmackeyfile=/tmp/hmackeyfile
|
||||
passfile=/etc/confluent/confluent.apikey
|
||||
passcrypt=/tmp/passcrypt
|
||||
hmacfile=/tmp/hmacfile
|
||||
echo -n $(grep ^apitoken: cnflnt.yml|awk '{print $2}') > $hmackeyfile;
|
||||
/opt/confluent/bin/genpasshmac $passfile $passcrypt $hmacfile $hmackeyfile
|
||||
echo "NODENAME: "$nodename > /etc/confluent/confluent.info
|
||||
for dsrv in $deploysrvs; do
|
||||
echo 'MANAGER: '$dsrv >> /etc/confluent/confluent.info
|
||||
@ -38,6 +46,7 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
|
||||
udevadm info $i | grep ID_NET_DRIVER=cdc_ether > /dev/null && continue
|
||||
ip link set $(basename $i) up
|
||||
done
|
||||
sleep 10
|
||||
usedhcp=0
|
||||
for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK| awk '{print $2}' | sed -e 's/:$//'); do
|
||||
if [ "$autoconfigmethod" = "dhcp" ]; then
|
||||
@ -59,15 +68,17 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
|
||||
v4nm=$(grep ipv4_netmask: $tcfg)
|
||||
v4nm=${v4nm#ipv4_netmask: }
|
||||
TESTSRV=$(python /opt/confluent/bin/apiclient -c 2> /dev/null)
|
||||
if [ ! -z "$TESTSRV" ]; then
|
||||
python /opt/confluent/bin/apiclient -p $hmacfile /confluent-api/self/registerapikey $passcrypt
|
||||
mgr=$TESTSRV
|
||||
ifname=$NICGUESS
|
||||
shutdownnic=$ifname
|
||||
break
|
||||
fi
|
||||
if [ ! -z "$v4gw" ]; then
|
||||
ip route del default via $v4gw
|
||||
fi
|
||||
ip -4 addr flush dev $NICGUESS
|
||||
if [ ! -z "$TESTSRV" ]; then
|
||||
mgr=$TESTSRV
|
||||
ifname=$NICGUESS
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
@ -87,13 +98,18 @@ elif [ -z "$ifname" ]; then
|
||||
grep ^EXTMGRINFO: /etc/confluent/confluent.info || return 0 # Do absolutely nothing if no data at all yet
|
||||
echo -n "" > /etc/cmdline.d/01-confluent.conf
|
||||
else
|
||||
echo -n ip=$v4addr::$v4gw:$v4nm:$hostname:$ifname:none > /etc/cmdline.d/01-confluent.conf
|
||||
echo ip=$v4addr::$v4gw:$v4nm:$hostname:$ifname:none > /etc/cmdline.d/01-confluent.conf
|
||||
fi
|
||||
python /opt/confluent/bin/apiclient /confluent-api/self/deploycfg > /etc/confluent/confluent.deploycfg
|
||||
if [ ! -z "$shutdownnic" ]; then
|
||||
if [ ! -z "$v4gw" ]; then
|
||||
ip route del default via $v4gw
|
||||
fi
|
||||
ip -4 addr flush dev $shutdownnic
|
||||
fi
|
||||
echo -n "" > /tmp/confluent.initq
|
||||
# restart cmdline
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
#TODO: blkid --label <whatever> to find mounted api
|
||||
python /opt/confluent/bin/apiclient /confluent-api/self/deploycfg > /etc/confluent/confluent.deploycfg
|
||||
echo -n "" > /tmp/confluent.initq
|
||||
if [ -z "$ifname" ]; then
|
||||
ifidx=$(cat /tmp/confluent.ifidx)
|
||||
ifname=$(ip link |grep ^$ifidx:|awk '{print $2}')
|
||||
|
@ -189,8 +189,15 @@ cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
EOC
|
||||
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
linktype=$(ip link |grep -A2 ${ifname}|tail -n 1|awk '{print $1}')
|
||||
if [ "$linktype" = link/infiniband ]; then
|
||||
linktype="infiniband"
|
||||
else
|
||||
linktype="ethernet"
|
||||
fi
|
||||
echo type=$linktype >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
|
||||
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
type=ethernet
|
||||
autoconnect-retries=1
|
||||
EOC
|
||||
echo interface-name=$ifname >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
@ -199,9 +206,6 @@ multi-connect=1
|
||||
permissions=
|
||||
wait-device-timeout=60000
|
||||
|
||||
[ethernet]
|
||||
mac-address-blacklist=
|
||||
|
||||
EOC
|
||||
autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
|
||||
auto6configmethod=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
|
||||
|
@ -13,6 +13,12 @@ import subprocess
|
||||
|
||||
bootuuid = None
|
||||
|
||||
def get_partname(devname, idx):
|
||||
if devname[-1] in '0123456789':
|
||||
return '{}p{}'.format(devname, idx)
|
||||
else:
|
||||
return '{}{}'.format(devname, idx)
|
||||
|
||||
def get_next_part_meta(img, imgsize):
|
||||
if img.tell() == imgsize:
|
||||
return None
|
||||
@ -202,6 +208,8 @@ def fixup(rootdir, vols):
|
||||
partnum = re.search('(\d+)$', targdev).group(1)
|
||||
targblock = re.search('(.*)\d+$', targdev).group(1)
|
||||
if targblock:
|
||||
if 'nvme' in targblock and targblock[-1] == 'p':
|
||||
targblock = targblock[:-1]
|
||||
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
|
||||
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
|
||||
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
|
||||
@ -295,7 +303,7 @@ def install_to_disk(imgpath):
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
|
||||
vol['targetdisk'] = instdisk + '{0}'.format(volidx)
|
||||
vol['targetdisk'] = get_partname(instdisk, volidx)
|
||||
curroffset += size + 1
|
||||
if not lvmvols:
|
||||
if swapsize:
|
||||
@ -305,10 +313,10 @@ def install_to_disk(imgpath):
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
|
||||
subprocess.check_call(['mkswap', instdisk + '{}'.format(volidx + 1)])
|
||||
subprocess.check_call(['mkswap', get_partname(instdisk, volidx + 1)])
|
||||
else:
|
||||
parted.run('mkpart lvm {}s 100%'.format(curroffset))
|
||||
lvmpart = instdisk + '{}'.format(volidx + 1)
|
||||
lvmpart = get_partname(instdisk, volidx + 1)
|
||||
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
|
||||
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
|
||||
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
|
||||
|
@ -127,5 +127,7 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
kill $(grep -l ^/usr/lib/systemd/systemd-udevd /proc/*/cmdline|cut -d/ -f 3)
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
@ -33,15 +33,7 @@
|
||||
reboot
|
||||
|
||||
%packages
|
||||
@^minimal-environment
|
||||
#-kernel-uek # This can opt out of the UEK for the relevant distribution
|
||||
bind-utils
|
||||
chrony
|
||||
pciutils
|
||||
python3
|
||||
rsync
|
||||
tar
|
||||
-iwl*-firmware
|
||||
%include /tmp/pkglist
|
||||
%include /tmp/addonpackages
|
||||
%include /tmp/cryptpkglist
|
||||
%end
|
||||
|
9
confluent_osdeploy/el8/profiles/default/packagelist
Normal file
9
confluent_osdeploy/el8/profiles/default/packagelist
Normal file
@ -0,0 +1,9 @@
|
||||
@^minimal-environment
|
||||
#-kernel-uek # This can opt out of the UEK for the relevant distribution
|
||||
bind-utils
|
||||
chrony
|
||||
pciutils
|
||||
python3
|
||||
rsync
|
||||
tar
|
||||
-iwl*-firmware
|
4
confluent_osdeploy/el8/profiles/default/partitioning
Normal file
4
confluent_osdeploy/el8/profiles/default/partitioning
Normal file
@ -0,0 +1,4 @@
|
||||
clearpart --all --initlabel
|
||||
ignoredisk --only-use %%INSTALLDISK%%
|
||||
autopart --nohome %%LUKSHOOK%%
|
||||
|
@ -87,6 +87,7 @@ done
|
||||
cryptboot=$(grep ^encryptboot: /etc/confluent/confluent.deploycfg | awk '{print $2}')
|
||||
LUKSPARTY=''
|
||||
touch /tmp/cryptpkglist
|
||||
touch /tmp/pkglist
|
||||
touch /tmp/addonpackages
|
||||
if [ "$cryptboot" == "tpm2" ]; then
|
||||
LUKSPARTY="--encrypted --passphrase=$(cat /etc/confluent/confluent.apikey)"
|
||||
@ -102,15 +103,18 @@ confluentpython /opt/confluent/bin/apiclient /confluent-public/os/$confluent_pro
|
||||
run_remote pre.custom
|
||||
run_remote_parts pre.d
|
||||
confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile/kickstart -o /tmp/kickstart.base
|
||||
if grep '^%include /tmp/pkglist' /tmp/kickstart.* > /dev/null; then
|
||||
confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile/packagelist -o /tmp/pkglist
|
||||
fi
|
||||
grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || touch /tmp/installdisk
|
||||
if [ ! -e /tmp/installdisk ]; then
|
||||
run_remote_python getinstalldisk
|
||||
fi
|
||||
confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile/partitioning -o /tmp/partitioning.template
|
||||
grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || rm /tmp/installdisk
|
||||
if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then
|
||||
echo clearpart --all --initlabel >> /tmp/partitioning
|
||||
echo ignoredisk --only-use $(cat /tmp/installdisk) >> /tmp/partitioning
|
||||
echo autopart --nohome $LUKSPARTY >> /tmp/partitioning
|
||||
INSTALLDISK=$(cat /tmp/installdisk)
|
||||
sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e s/%%LUKSHOOK%%/$LUKSPARTY/ /tmp/partitioning.template > /tmp/partitioning
|
||||
dd if=/dev/zero of=/dev/$(cat /tmp/installdisk) bs=1M count=1 >& /dev/null
|
||||
vgchange -a n >& /dev/null
|
||||
fi
|
||||
|
@ -154,8 +154,14 @@ cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
EOC
|
||||
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
linktype=$(ip link |grep -A2 ${ifname}|tail -n 1|awk '{print $1}')
|
||||
if [ "$linktype" = link/infiniband ]; then
|
||||
linktype="infiniband"
|
||||
else
|
||||
linktype="ethernet"
|
||||
fi
|
||||
echo type=$linktype >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
|
||||
type=ethernet
|
||||
autoconnect-retries=1
|
||||
EOC
|
||||
echo interface-name=$ifname >> /run/NetworkManager/system-connections/$ifname.nmconnection
|
||||
@ -164,9 +170,6 @@ multi-connect=1
|
||||
permissions=
|
||||
wait-device-timeout=60000
|
||||
|
||||
[ethernet]
|
||||
mac-address-blacklist=
|
||||
|
||||
EOC
|
||||
autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
|
||||
auto6configmethod=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
|
||||
|
@ -13,6 +13,12 @@ import subprocess
|
||||
|
||||
bootuuid = None
|
||||
|
||||
def get_partname(devname, idx):
|
||||
if devname[-1] in '0123456789':
|
||||
return '{}p{}'.format(devname, idx)
|
||||
else:
|
||||
return '{}{}'.format(devname, idx)
|
||||
|
||||
def get_next_part_meta(img, imgsize):
|
||||
if img.tell() == imgsize:
|
||||
return None
|
||||
@ -202,6 +208,8 @@ def fixup(rootdir, vols):
|
||||
partnum = re.search('(\d+)$', targdev).group(1)
|
||||
targblock = re.search('(.*)\d+$', targdev).group(1)
|
||||
if targblock:
|
||||
if targblock.endswith('p') and 'nvme' in targblock:
|
||||
targblock = targblock[:-1]
|
||||
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
|
||||
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
|
||||
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
|
||||
@ -295,7 +303,7 @@ def install_to_disk(imgpath):
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
|
||||
vol['targetdisk'] = instdisk + '{0}'.format(volidx)
|
||||
vol['targetdisk'] = get_partname(instdisk, volidx)
|
||||
curroffset += size + 1
|
||||
if not lvmvols:
|
||||
if swapsize:
|
||||
@ -305,10 +313,10 @@ def install_to_disk(imgpath):
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
|
||||
subprocess.check_call(['mkswap', instdisk + '{}'.format(volidx + 1)])
|
||||
subprocess.check_call(['mkswap', get_partname(instdisk, volidx + 1)])
|
||||
else:
|
||||
parted.run('mkpart lvm {}s 100%'.format(curroffset))
|
||||
lvmpart = instdisk + '{}'.format(volidx + 1)
|
||||
lvmpart = get_partname(instdisk, volidx + 1)
|
||||
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
|
||||
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
|
||||
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
|
||||
|
@ -127,5 +127,7 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
kill $(grep -l ^/usr/lib/systemd/systemd-udevd /proc/*/cmdline|cut -d/ -f 3)
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
@ -138,4 +138,6 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
@ -107,7 +107,7 @@ if [ "$v6meth" = static ]; then
|
||||
ip route add default via $v6gw
|
||||
fi
|
||||
fi
|
||||
v4meth=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
v4meth=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
if [ "$v4meth" = static ]; then
|
||||
v4addr=$(grep ^ipv4_address: /etc/confluent/confluent.deploycfg | awk '{print $2}')
|
||||
v4prefix=$(grep ^prefix: /etc/confluent/confluent.deploycfg | awk '{print $2}')
|
||||
|
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=First Boot Process
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/confluent/bin/firstboot.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is executed on the first boot after install has
|
||||
# completed. It is best to edit the middle of the file as
|
||||
# noted below so custom commands are executed before
|
||||
# the script notifies confluent that install is fully complete.
|
||||
|
||||
HOME=$(getent passwd $(whoami)|cut -d: -f 6)
|
||||
export HOME
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
while ! ping -c 1 $confluent_mgr >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ! -f /etc/confluent/firstboot.ran ]; then
|
||||
touch /etc/confluent/firstboot.ran
|
||||
|
||||
run_remote firstboot.custom
|
||||
# Firstboot scripts may be placed into firstboot.d, e.g. firstboot.d/01-firstaction.sh, firstboot.d/02-secondaction.sh
|
||||
run_remote_parts firstboot.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/firstboot.d/
|
||||
run_remote_config firstboot.d
|
||||
fi
|
||||
|
||||
curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
) &
|
||||
tail --pid $! -F /var/log/confluent/confluent-firstboot.log > /dev/console
|
@ -0,0 +1,93 @@
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
class DiskInfo(object):
|
||||
def __init__(self, devname):
|
||||
self.name = devname
|
||||
self.wwn = None
|
||||
self.path = None
|
||||
self.model = ''
|
||||
self.size = 0
|
||||
self.driver = None
|
||||
self.mdcontainer = ''
|
||||
devnode = '/dev/{0}'.format(devname)
|
||||
qprop = subprocess.check_output(
|
||||
['udevadm', 'info', '--query=property', devnode])
|
||||
if not isinstance(qprop, str):
|
||||
qprop = qprop.decode('utf8')
|
||||
for prop in qprop.split('\n'):
|
||||
if '=' not in prop:
|
||||
continue
|
||||
k, v = prop.split('=', 1)
|
||||
if k == 'DEVTYPE' and v != 'disk':
|
||||
raise Exception('Not a disk')
|
||||
elif k == 'DM_NAME':
|
||||
raise Exception('Device Mapper')
|
||||
elif k == 'ID_MODEL':
|
||||
self.model = v
|
||||
elif k == 'DEVPATH':
|
||||
self.path = v
|
||||
elif k == 'ID_WWN':
|
||||
self.wwn = v
|
||||
elif k == 'MD_CONTAINER':
|
||||
self.mdcontainer = v
|
||||
attrs = subprocess.check_output(['udevadm', 'info', '-a', devnode])
|
||||
if not isinstance(attrs, str):
|
||||
attrs = attrs.decode('utf8')
|
||||
for attr in attrs.split('\n'):
|
||||
if '==' not in attr:
|
||||
continue
|
||||
k, v = attr.split('==', 1)
|
||||
k = k.strip()
|
||||
if k == 'ATTRS{size}':
|
||||
self.size = v.replace('"', '')
|
||||
elif (k == 'DRIVERS' and not self.driver
|
||||
and v not in ('"sd"', '""')):
|
||||
self.driver = v.replace('"', '')
|
||||
if not self.driver and 'imsm' not in self.mdcontainer:
|
||||
raise Exception("No driver detected")
|
||||
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
|
||||
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
|
||||
self.size = int(sizesrc.read()) * 512
|
||||
if int(self.size) < 536870912:
|
||||
raise Exception("Device too small for install")
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
if self.model.lower() in ('m.2 nvme 2-bay raid kit', 'thinksystem_m.2_vd', 'thinksystem m.2', 'thinksystem_m.2'):
|
||||
return 0
|
||||
if 'imsm' in self.mdcontainer:
|
||||
return 1
|
||||
if self.driver == 'ahci':
|
||||
return 2
|
||||
if self.driver.startswith('megaraid'):
|
||||
return 3
|
||||
if self.driver.startswith('mpt'):
|
||||
return 4
|
||||
return 99
|
||||
|
||||
def __repr__(self):
|
||||
return repr({
|
||||
'name': self.name,
|
||||
'path': self.path,
|
||||
'wwn': self.wwn,
|
||||
'driver': self.driver,
|
||||
'size': self.size,
|
||||
'model': self.model,
|
||||
})
|
||||
|
||||
|
||||
def main():
|
||||
disks = []
|
||||
for disk in sorted(os.listdir('/sys/class/block')):
|
||||
try:
|
||||
disk = DiskInfo(disk)
|
||||
disks.append(disk)
|
||||
except Exception as e:
|
||||
print("Skipping {0}: {1}".format(disk, str(e)))
|
||||
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
|
||||
if nd:
|
||||
open('/tmp/installdisk', 'w').write(nd[0])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,426 @@
|
||||
#!/usr/bin/python3
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import shutil
|
||||
import socket
|
||||
import stat
|
||||
import struct
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
bootuuid = None
|
||||
|
||||
|
||||
def get_partname(devname, idx):
|
||||
if devname[-1] in '0123456789':
|
||||
return '{}p{}'.format(devname, idx)
|
||||
else:
|
||||
return '{}{}'.format(devname, idx)
|
||||
|
||||
def get_next_part_meta(img, imgsize):
|
||||
if img.tell() == imgsize:
|
||||
return None
|
||||
pathlen = struct.unpack('!H', img.read(2))[0]
|
||||
mountpoint = img.read(pathlen).decode('utf8')
|
||||
jsonlen = struct.unpack('!I', img.read(4))[0]
|
||||
metadata = json.loads(img.read(jsonlen).decode('utf8'))
|
||||
img.seek(16, 1) # skip the two 64-bit values we don't use, they are in json
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip filesystem type
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip orig devname (redundant with json)
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip padding
|
||||
nextlen = struct.unpack('!Q', img.read(8))[0]
|
||||
img.seek(nextlen, 1) # go to next section
|
||||
return metadata
|
||||
|
||||
def get_multipart_image_meta(img):
|
||||
img.seek(0, 2)
|
||||
imgsize = img.tell()
|
||||
img.seek(16)
|
||||
seekamt = img.read(1)
|
||||
img.seek(struct.unpack('B', seekamt)[0], 1)
|
||||
partinfo = get_next_part_meta(img, imgsize)
|
||||
while partinfo:
|
||||
yield partinfo
|
||||
partinfo = get_next_part_meta(img, imgsize)
|
||||
|
||||
def get_image_metadata(imgpath):
|
||||
with open(imgpath, 'rb') as img:
|
||||
header = img.read(16)
|
||||
if header == b'\x63\x7b\x9d\x26\xb7\xfd\x48\x30\x89\xf9\x11\xcf\x18\xfd\xff\xa1':
|
||||
for md in get_multipart_image_meta(img):
|
||||
yield md
|
||||
else:
|
||||
raise Exception('Installation from single part image not supported')
|
||||
|
||||
class PartedRunner():
|
||||
def __init__(self, disk):
|
||||
self.disk = disk
|
||||
|
||||
def run(self, command, check=True):
|
||||
command = command.split()
|
||||
command = ['parted', '-a', 'optimal', '-s', self.disk] + command
|
||||
if check:
|
||||
return subprocess.check_output(command).decode('utf8')
|
||||
else:
|
||||
return subprocess.run(command, stdout=subprocess.PIPE).stdout.decode('utf8')
|
||||
|
||||
def fixup(rootdir, vols):
|
||||
devbymount = {}
|
||||
for vol in vols:
|
||||
devbymount[vol['mount']] = vol['targetdisk']
|
||||
fstabfile = os.path.join(rootdir, 'etc/fstab')
|
||||
with open(fstabfile) as tfile:
|
||||
fstab = tfile.read().split('\n')
|
||||
while not fstab[0]:
|
||||
fstab = fstab[1:]
|
||||
if os.path.exists(os.path.join(rootdir, '.autorelabel')):
|
||||
os.unlink(os.path.join(rootdir, '.autorelabel'))
|
||||
with open(fstabfile, 'w') as tfile:
|
||||
for tab in fstab:
|
||||
entry = tab.split()
|
||||
if tab.startswith('#ORIGFSTAB#'):
|
||||
if entry[1] in devbymount:
|
||||
targetdev = devbymount[entry[1]]
|
||||
if targetdev.startswith('/dev/localstorage/'):
|
||||
entry[0] = targetdev
|
||||
else:
|
||||
uuid = subprocess.check_output(['blkid', '-s', 'UUID', '-o', 'value', targetdev]).decode('utf8')
|
||||
uuid = uuid.strip()
|
||||
entry[0] = 'UUID={}'.format(uuid)
|
||||
elif entry[2] == 'swap':
|
||||
entry[0] = '/dev/mapper/localstorage-swap'
|
||||
entry[0] = entry[0].ljust(42)
|
||||
entry[1] = entry[1].ljust(16)
|
||||
entry[3] = entry[3].ljust(28)
|
||||
tab = '\t'.join(entry)
|
||||
tfile.write(tab + '\n')
|
||||
with open(os.path.join(rootdir, 'etc/hostname'), 'w') as nameout:
|
||||
nameout.write(socket.gethostname() + '\n')
|
||||
selinuxconfig = os.path.join(rootdir, 'etc/selinux/config')
|
||||
policy = None
|
||||
if os.path.exists(selinuxconfig):
|
||||
with open(selinuxconfig) as cfgin:
|
||||
sec = cfgin.read().split('\n')
|
||||
for l in sec:
|
||||
l = l.split('#', 1)[0]
|
||||
if l.startswith('SELINUXTYPE='):
|
||||
_, policy = l.split('=')
|
||||
for sshkey in glob.glob(os.path.join(rootdir, 'etc/ssh/*_key*')):
|
||||
os.unlink(sshkey)
|
||||
for sshkey in glob.glob('/etc/ssh/*_key*'):
|
||||
newkey = os.path.join(rootdir, sshkey[1:])
|
||||
shutil.copy2(sshkey, newkey)
|
||||
finfo = os.stat(sshkey)
|
||||
os.chown(newkey, finfo[stat.ST_UID], finfo[stat.ST_GID])
|
||||
|
||||
# Will use confignet to handle networking for ubuntu
|
||||
shutil.rmtree(os.path.join(rootdir, 'etc/confluent/'))
|
||||
shutil.copytree('/etc/confluent', os.path.join(rootdir, 'etc/confluent'))
|
||||
if policy:
|
||||
sys.stdout.write('Applying SELinux labeling...')
|
||||
sys.stdout.flush()
|
||||
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'etc')])
|
||||
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'opt')])
|
||||
sys.stdout.write('Done\n')
|
||||
sys.stdout.flush()
|
||||
for metafs in ('proc', 'sys', 'dev'):
|
||||
subprocess.check_call(['mount', '-o', 'bind', '/{}'.format(metafs), os.path.join(rootdir, metafs)])
|
||||
if os.path.exists(os.path.join(rootdir, 'etc/lvm/devices/system.devices')):
|
||||
os.remove(os.path.join(rootdir, 'etc/lvm/devices/system.devices'))
|
||||
grubsyscfg = os.path.join(rootdir, 'etc/sysconfig/grub')
|
||||
if not os.path.exists(grubsyscfg):
|
||||
grubsyscfg = os.path.join(rootdir, 'etc/default/grub')
|
||||
with open(grubsyscfg) as defgrubin:
|
||||
defgrub = defgrubin.read().split('\n')
|
||||
with open(grubsyscfg, 'w') as defgrubout:
|
||||
for gline in defgrub:
|
||||
gline = gline.split()
|
||||
newline = []
|
||||
for ent in gline:
|
||||
if ent.startswith('resume=') or ent.startswith('rd.lvm.lv'):
|
||||
continue
|
||||
newline.append(ent)
|
||||
defgrubout.write(' '.join(newline) + '\n')
|
||||
grubcfg = subprocess.check_output(['find', os.path.join(rootdir, 'boot'), '-name', 'grub.cfg']).decode('utf8').strip().replace(rootdir, '/').replace('//', '/')
|
||||
grubcfg = grubcfg.split('\n')
|
||||
if not grubcfg[-1]:
|
||||
grubcfg = grubcfg[:-1]
|
||||
if len(grubcfg) == 1:
|
||||
grubcfg = grubcfg[0]
|
||||
else:
|
||||
for gcfg in grubcfg:
|
||||
rgcfg = os.path.join(rootdir, gcfg[1:]) # gcfg has a leading / to get rid of
|
||||
if os.stat(rgcfg).st_size > 256:
|
||||
grubcfg = gcfg
|
||||
else:
|
||||
with open(rgcfg, 'r') as gin:
|
||||
tgrubcfg = gin.read()
|
||||
tgrubcfg = tgrubcfg.split('\n')
|
||||
if 'search --no-floppy --fs-uuid --set=dev' in tgrubcfg[0]:
|
||||
tgrubcfg[0] = 'search --no-floppy --fs-uuid --set=dev ' + bootuuid
|
||||
elif 'search.fs_uuid ' in tgrubcfg[0] and 'root' in tgrubcfg[0]:
|
||||
tgrubcfg[0] = 'search.fs_uuid ' + bootuuid + ' root'
|
||||
with open(rgcfg, 'w') as gout:
|
||||
for gcline in tgrubcfg:
|
||||
gout.write(gcline)
|
||||
gout.write('\n')
|
||||
try:
|
||||
# must fixup root@d2:/boot/efi/EFI# cat ubuntu/grub.cfg ... uuid
|
||||
subprocess.check_call(['chroot', rootdir, 'grub-mkconfig', '-o', grubcfg])
|
||||
except Exception as e:
|
||||
print(repr(e))
|
||||
print(rootdir)
|
||||
print(grubcfg)
|
||||
time.sleep(86400)
|
||||
newroot = None
|
||||
with open('/etc/shadow') as shadowin:
|
||||
shents = shadowin.read().split('\n')
|
||||
for shent in shents:
|
||||
shent = shent.split(':')
|
||||
if not shent:
|
||||
continue
|
||||
if shent[0] == 'root' and shent[1] not in ('*', '!!', ''):
|
||||
newroot = shent[1]
|
||||
if newroot:
|
||||
shlines = None
|
||||
with open(os.path.join(rootdir, 'etc/shadow')) as oshadow:
|
||||
shlines = oshadow.read().split('\n')
|
||||
with open(os.path.join(rootdir, 'etc/shadow'), 'w') as oshadow:
|
||||
for line in shlines:
|
||||
if line.startswith('root:'):
|
||||
line = line.split(':')
|
||||
line[1] = newroot
|
||||
line = ':'.join(line)
|
||||
oshadow.write(line + '\n')
|
||||
partnum = None
|
||||
targblock = None
|
||||
for vol in vols:
|
||||
if vol['mount'] == '/boot/efi':
|
||||
targdev = vol['targetdisk']
|
||||
partnum = re.search('(\d+)$', targdev).group(1)
|
||||
targblock = re.search('(.*)\d+$', targdev).group(1)
|
||||
if targblock:
|
||||
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
|
||||
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
|
||||
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
|
||||
#other network interfaces
|
||||
|
||||
|
||||
def had_swap():
|
||||
with open('/etc/fstab') as tabfile:
|
||||
tabs = tabfile.read().split('\n')
|
||||
for tab in tabs:
|
||||
tab = tab.split()
|
||||
if len(tab) < 3:
|
||||
continue
|
||||
if tab[2] == 'swap':
|
||||
return True
|
||||
return False
|
||||
|
||||
def install_to_disk(imgpath):
|
||||
global bootuuid
|
||||
lvmvols = {}
|
||||
deftotsize = 0
|
||||
mintotsize = 0
|
||||
deflvmsize = 0
|
||||
minlvmsize = 0
|
||||
biggestsize = 0
|
||||
biggestfs = None
|
||||
plainvols = {}
|
||||
allvols = []
|
||||
swapsize = 0
|
||||
if had_swap():
|
||||
with open('/proc/meminfo') as meminfo:
|
||||
swapsize = meminfo.read().split('\n')[0]
|
||||
swapsize = int(swapsize.split()[1])
|
||||
if swapsize < 2097152:
|
||||
swapsize = swapsize * 2
|
||||
elif swapsize > 8388608 and swapsize < 67108864:
|
||||
swapsize = swapsize * 0.5
|
||||
elif swapsize >= 67108864:
|
||||
swapsize = 33554432
|
||||
swapsize = int(swapsize * 1024)
|
||||
deftotsize = swapsize
|
||||
mintotsize = swapsize
|
||||
for fs in get_image_metadata(imgpath):
|
||||
allvols.append(fs)
|
||||
deftotsize += fs['initsize']
|
||||
mintotsize += fs['minsize']
|
||||
if fs['initsize'] > biggestsize:
|
||||
biggestfs = fs
|
||||
biggestsize = fs['initsize']
|
||||
if fs['device'].startswith('/dev/mapper'):
|
||||
lvmvols[fs['device'].replace('/dev/mapper/', '')] = fs
|
||||
deflvmsize += fs['initsize']
|
||||
minlvmsize += fs['minsize']
|
||||
else:
|
||||
plainvols[int(re.search('(\d+)$', fs['device'])[0])] = fs
|
||||
with open('/tmp/installdisk') as diskin:
|
||||
instdisk = diskin.read()
|
||||
instdisk = '/dev/' + instdisk
|
||||
parted = PartedRunner(instdisk)
|
||||
dinfo = parted.run('unit s print', check=False)
|
||||
dinfo = dinfo.split('\n')
|
||||
sectors = 0
|
||||
sectorsize = 0
|
||||
for inf in dinfo:
|
||||
if inf.startswith('Disk {0}:'.format(instdisk)):
|
||||
_, sectors = inf.split(': ')
|
||||
sectors = int(sectors.replace('s', ''))
|
||||
if inf.startswith('Sector size (logical/physical):'):
|
||||
_, sectorsize = inf.split(':')
|
||||
sectorsize = sectorsize.split('/')[0]
|
||||
sectorsize = sectorsize.replace('B', '')
|
||||
sectorsize = int(sectorsize)
|
||||
# for now, only support resizing/growing the largest partition
|
||||
minexcsize = deftotsize - biggestfs['initsize']
|
||||
mintotsize = deftotsize - biggestfs['initsize'] + biggestfs['minsize']
|
||||
minsectors = mintotsize // sectorsize
|
||||
if sectors < (minsectors + 65536):
|
||||
raise Exception('Disk too small to fit image')
|
||||
biggestsectors = sectors - (minexcsize // sectorsize)
|
||||
biggestsize = sectorsize * biggestsectors
|
||||
parted.run('mklabel gpt')
|
||||
curroffset = 2048
|
||||
for volidx in sorted(plainvols):
|
||||
vol = plainvols[volidx]
|
||||
if vol is not biggestfs:
|
||||
size = vol['initsize'] // sectorsize
|
||||
else:
|
||||
size = biggestsize // sectorsize
|
||||
size += 2047 - (size % 2048)
|
||||
end = curroffset + size
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
|
||||
vol['targetdisk'] = get_partname(instdisk, volidx)
|
||||
curroffset += size + 1
|
||||
if not lvmvols:
|
||||
if swapsize:
|
||||
swapsize = swapsize // sectorsize
|
||||
swapsize += 2047 - (size % 2048)
|
||||
end = curroffset + swapsize
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
|
||||
subprocess.check_call(['mkswap', get_partname(instdisk, volidx + 1)])
|
||||
else:
|
||||
parted.run('mkpart lvm {}s 100%'.format(curroffset))
|
||||
lvmpart = get_partname(instdisk, volidx + 1)
|
||||
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
|
||||
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
|
||||
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
|
||||
vginfo = vginfo.split('\n')
|
||||
pesize = 0
|
||||
pes = 0
|
||||
for infline in vginfo:
|
||||
infline = infline.split()
|
||||
if len(infline) >= 3 and infline[:2] == ['PE', 'Size']:
|
||||
pesize = int(infline[2])
|
||||
if len(infline) >= 5 and infline[:2] == ['Free', 'PE']:
|
||||
pes = int(infline[4])
|
||||
takeaway = swapsize // pesize
|
||||
for volidx in lvmvols:
|
||||
vol = lvmvols[volidx]
|
||||
if vol is biggestfs:
|
||||
continue
|
||||
takeaway += vol['initsize'] // pesize
|
||||
takeaway += 1
|
||||
biggestextents = pes - takeaway
|
||||
for volidx in lvmvols:
|
||||
vol = lvmvols[volidx]
|
||||
if vol is biggestfs:
|
||||
extents = biggestextents
|
||||
else:
|
||||
extents = vol['initsize'] // pesize
|
||||
extents += 1
|
||||
if vol['mount'] == '/':
|
||||
lvname = 'root'
|
||||
else:
|
||||
lvname = vol['mount'].replace('/', '_')
|
||||
subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, 'localstorage'])
|
||||
vol['targetdisk'] = '/dev/localstorage/{}'.format(lvname)
|
||||
if swapsize:
|
||||
subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', 'localstorage'])
|
||||
subprocess.check_call(['mkswap', '/dev/localstorage/swap'])
|
||||
os.makedirs('/run/imginst/targ')
|
||||
for vol in allvols:
|
||||
with open(vol['targetdisk'], 'wb') as partition:
|
||||
partition.write(b'\x00' * 1 * 1024 * 1024)
|
||||
subprocess.check_call(['mkfs.{}'.format(vol['filesystem']), vol['targetdisk']])
|
||||
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ'])
|
||||
source = vol['mount'].replace('/', '_')
|
||||
source = '/run/imginst/sources/' + source
|
||||
blankfsstat = os.statvfs('/run/imginst/targ')
|
||||
blankused = (blankfsstat.f_blocks - blankfsstat.f_bfree) * blankfsstat.f_bsize
|
||||
sys.stdout.write('\nWriting {0}: '.format(vol['mount']))
|
||||
with subprocess.Popen(['cp', '-ax', source + '/.', '/run/imginst/targ']) as copier:
|
||||
stillrunning = copier.poll()
|
||||
lastprogress = 0.0
|
||||
while stillrunning is None:
|
||||
currfsstat = os.statvfs('/run/imginst/targ')
|
||||
currused = (currfsstat.f_blocks - currfsstat.f_bfree) * currfsstat.f_bsize
|
||||
currused -= blankused
|
||||
with open('/proc/meminfo') as meminf:
|
||||
for line in meminf.read().split('\n'):
|
||||
if line.startswith('Dirty:'):
|
||||
_, dirty, _ = line.split()
|
||||
dirty = int(dirty) * 1024
|
||||
progress = (currused - dirty) / vol['minsize']
|
||||
if progress < lastprogress:
|
||||
progress = lastprogress
|
||||
if progress > 0.99:
|
||||
progress = 0.99
|
||||
lastprogress = progress
|
||||
progress = progress * 100
|
||||
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.5)
|
||||
stillrunning = copier.poll()
|
||||
if stillrunning != 0:
|
||||
raise Exception("Error copying volume")
|
||||
with subprocess.Popen(['sync']) as syncrun:
|
||||
stillrunning = syncrun.poll()
|
||||
while stillrunning is None:
|
||||
with open('/proc/meminfo') as meminf:
|
||||
for line in meminf.read().split('\n'):
|
||||
if line.startswith('Dirty:'):
|
||||
_, dirty, _ = line.split()
|
||||
dirty = int(dirty) * 1024
|
||||
progress = (vol['minsize'] - dirty) / vol['minsize']
|
||||
if progress < lastprogress:
|
||||
progress = lastprogress
|
||||
if progress > 0.99:
|
||||
progress = 0.99
|
||||
lastprogress = progress
|
||||
progress = progress * 100
|
||||
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.5)
|
||||
stillrunning = syncrun.poll()
|
||||
sys.stdout.write('\x1b[1K\rDone writing {0}'.format(vol['mount']))
|
||||
sys.stdout.write('\n')
|
||||
sys.stdout.flush()
|
||||
if vol['mount'] == '/boot':
|
||||
tbootuuid = subprocess.check_output(['blkid', vol['targetdisk']])
|
||||
if b'UUID="' in tbootuuid:
|
||||
bootuuid = tbootuuid.split(b'UUID="', 1)[1].split(b'"')[0].decode('utf8')
|
||||
|
||||
|
||||
|
||||
|
||||
subprocess.check_call(['umount', '/run/imginst/targ'])
|
||||
for vol in allvols:
|
||||
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ/' + vol['mount']])
|
||||
fixup('/run/imginst/targ', allvols)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
install_to_disk(os.environ['mountsrc'])
|
||||
|
@ -127,11 +127,13 @@ chmod +x /sysroot/opt/confluent/bin/onboot.sh
|
||||
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
|
||||
ln -s /etc/systemd/system/onboot.service /sysroot/etc/systemd/system/multi-user.target.wants/onboot.service
|
||||
cp /etc/confluent/functions /sysroot/etc/confluent/functions
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
. /etc/confluent/functions
|
||||
run_remote installimage
|
||||
exec reboot -f
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
. /etc/confluent/functions
|
||||
# the image will be used to deploy itself
|
||||
# provide both access to image (for parsing metadata)
|
||||
# and existing mounts of image (to take advantage of caching)
|
||||
mount -o bind /sys /sysroot/sys
|
||||
mount -o bind /dev /sysroot/dev
|
||||
mount -o bind /proc /sysroot/proc
|
||||
mount -o bind /run /sysroot/run
|
||||
|
||||
|
||||
if [ ! -f /tmp/mountparts.sh ]; then
|
||||
mkdir -p /sysroot/run/imginst/sources/_
|
||||
mount -o bind /mnt/remote /sysroot/run/imginst/sources/_
|
||||
else
|
||||
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $2}'); do
|
||||
srcname=${srcmount#/dev/mapper/mproot}
|
||||
srcdir=$(echo $srcmount | sed -e 's!/dev/mapper/mproot!/mnt/remote!' -e 's!_!/!g')
|
||||
mkdir -p /sysroot/run/imginst/sources/$srcname
|
||||
mount -o bind $srcdir /sysroot/run/imginst/sources/$srcname
|
||||
done
|
||||
fi
|
||||
cd /sysroot/run
|
||||
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_python getinstalldisk"
|
||||
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_parts pre.d"
|
||||
if [ ! -f /sysroot/tmp/installdisk ]; then
|
||||
echo 'Unable to find a suitable installation target device, ssh to port 2222 to investigate'
|
||||
while [ ! -f /sysroot/tmp/installdisk ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
lvm vgchange -a n
|
||||
udevadm control -e
|
||||
if [ -f /sysroot/etc/lvm/devices/system.devices ]; then
|
||||
rm /sysroot/etc/lvm/devices/system.devices
|
||||
fi
|
||||
chroot /sysroot /usr/lib/systemd/systemd-udevd --daemon
|
||||
chroot /sysroot bash -c "source /etc/confluent/functions; run_remote_python image2disk.py"
|
||||
echo "Port 22" >> /etc/ssh/sshd_config
|
||||
echo 'Match LocalPort 22' >> /etc/ssh/sshd_config
|
||||
echo ' ChrootDirectory /sysroot/run/imginst/targ' >> /etc/ssh/sshd_config
|
||||
kill -HUP $(cat /run/sshd.pid)
|
||||
cat /tls/*.pem > /sysroot/run/imginst/targ/usr/local/share/ca-certificates/confluent.crt
|
||||
chroot /sysroot/run/imginst/targ update-ca-certificates
|
||||
|
||||
chroot /sysroot/run/imginst/targ bash -c "source /etc/confluent/functions; run_remote post.sh"
|
||||
chroot /sysroot bash -c "umount \$(tac /proc/mounts|awk '{print \$2}'|grep ^/run/imginst/targ)"
|
||||
|
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is executed 'chrooted' into a cloned disk target before rebooting
|
||||
#
|
||||
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
mkdir -p /var/log/confluent
|
||||
chmod 700 /var/log/confluent
|
||||
exec >> /var/log/confluent/confluent-post.log
|
||||
exec 2>> /var/log/confluent/confluent-post.log
|
||||
chmod 600 /var/log/confluent/confluent-post.log
|
||||
tail -f /var/log/confluent/confluent-post.log > /dev/console &
|
||||
logshowpid=$!
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
|
||||
mkdir -p /opt/confluent/bin
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
|
||||
chmod +x /opt/confluent/bin/firstboot.sh
|
||||
systemctl enable firstboot
|
||||
run_remote_python syncfileclient
|
||||
run_remote_python confignet
|
||||
run_remote post.custom
|
||||
# post scripts may be placed into post.d, e.g. post.d/01-firstaction.sh, post.d/02-secondaction.sh
|
||||
run_remote_parts post.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/post.d/
|
||||
run_remote_config post.d
|
||||
|
||||
curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
|
||||
kill $logshowpid
|
||||
|
||||
|
@ -0,0 +1,286 @@
|
||||
#!/usr/bin/python3
|
||||
import subprocess
|
||||
import importlib
|
||||
import tempfile
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import pwd
|
||||
import grp
|
||||
from importlib.machinery import SourceFileLoader
|
||||
try:
|
||||
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
|
||||
except FileNotFoundError:
|
||||
apiclient = SourceFileLoader('apiclient', '/etc/confluent/apiclient').load_module()
|
||||
|
||||
|
||||
def partitionhostsline(line):
|
||||
comment = ''
|
||||
try:
|
||||
cmdidx = line.index('#')
|
||||
comment = line[cmdidx:]
|
||||
line = line[:cmdidx].strip()
|
||||
except ValueError:
|
||||
pass
|
||||
if not line:
|
||||
return '', [], comment
|
||||
ipaddr, names = line.split(maxsplit=1)
|
||||
names = names.split()
|
||||
return ipaddr, names, comment
|
||||
|
||||
class HostMerger(object):
|
||||
def __init__(self):
|
||||
self.byip = {}
|
||||
self.byname = {}
|
||||
self.sourcelines = []
|
||||
self.targlines = []
|
||||
|
||||
def read_source(self, sourcefile):
|
||||
with open(sourcefile, 'r') as hfile:
|
||||
self.sourcelines = hfile.read().split('\n')
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
for x in range(len(self.sourcelines)):
|
||||
line = self.sourcelines[x]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip:
|
||||
self.byip[currip] = x
|
||||
for name in names:
|
||||
self.byname[name] = x
|
||||
|
||||
def read_target(self, targetfile):
|
||||
with open(targetfile, 'r') as hfile:
|
||||
lines = hfile.read().split('\n')
|
||||
if not lines[-1]:
|
||||
lines = lines[:-1]
|
||||
for y in range(len(lines)):
|
||||
line = lines[y]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip in self.byip:
|
||||
x = self.byip[currip]
|
||||
if self.sourcelines[x] is None:
|
||||
# have already consumed this enntry
|
||||
continue
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
continue
|
||||
for name in names:
|
||||
if name in self.byname:
|
||||
x = self.byname[name]
|
||||
if self.sourcelines[x] is None:
|
||||
break
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
break
|
||||
else:
|
||||
self.targlines.append(line)
|
||||
|
||||
def write_out(self, targetfile):
|
||||
while not self.targlines[-1]:
|
||||
self.targlines = self.targlines[:-1]
|
||||
if not self.targlines:
|
||||
break
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
if not self.sourcelines:
|
||||
break
|
||||
with open(targetfile, 'w') as hosts:
|
||||
for line in self.targlines:
|
||||
hosts.write(line + '\n')
|
||||
for line in self.sourcelines:
|
||||
if line is not None:
|
||||
hosts.write(line + '\n')
|
||||
|
||||
|
||||
class CredMerger:
|
||||
def __init__(self):
|
||||
try:
|
||||
with open('/etc/login.defs', 'r') as ldefs:
|
||||
defs = ldefs.read().split('\n')
|
||||
except FileNotFoundError:
|
||||
defs = []
|
||||
lkup = {}
|
||||
self.discardnames = {}
|
||||
self.shadowednames = {}
|
||||
for line in defs:
|
||||
try:
|
||||
line = line[:line.index('#')]
|
||||
except ValueError:
|
||||
pass
|
||||
keyval = line.split()
|
||||
if len(keyval) < 2:
|
||||
continue
|
||||
lkup[keyval[0]] = keyval[1]
|
||||
self.uidmin = int(lkup.get('UID_MIN', 1000))
|
||||
self.uidmax = int(lkup.get('UID_MAX', 60000))
|
||||
self.gidmin = int(lkup.get('GID_MIN', 1000))
|
||||
self.gidmax = int(lkup.get('GID_MAX', 60000))
|
||||
self.shadowlines = None
|
||||
|
||||
def read_passwd(self, source, targfile=False):
|
||||
self.read_generic(source, self.uidmin, self.uidmax, targfile)
|
||||
|
||||
def read_group(self, source, targfile=False):
|
||||
self.read_generic(source, self.gidmin, self.gidmax, targfile)
|
||||
|
||||
def read_generic(self, source, minid, maxid, targfile):
|
||||
if targfile:
|
||||
self.targdata = []
|
||||
else:
|
||||
self.sourcedata = []
|
||||
with open(source, 'r') as inputfile:
|
||||
for line in inputfile.read().split('\n'):
|
||||
try:
|
||||
name, _, uid, _ = line.split(':', 3)
|
||||
uid = int(uid)
|
||||
except ValueError:
|
||||
continue
|
||||
if targfile:
|
||||
if uid < minid or uid > maxid:
|
||||
self.targdata.append(line)
|
||||
else:
|
||||
self.discardnames[name] = 1
|
||||
else:
|
||||
if name[0] in ('+', '#', '@'):
|
||||
self.sourcedata.append(line)
|
||||
elif uid >= minid and uid <= maxid:
|
||||
self.sourcedata.append(line)
|
||||
|
||||
def read_shadow(self, source):
|
||||
self.shadowlines = []
|
||||
try:
|
||||
with open(source, 'r') as inshadow:
|
||||
for line in inshadow.read().split('\n'):
|
||||
try:
|
||||
name, _ = line.split(':' , 1)
|
||||
except ValueError:
|
||||
continue
|
||||
if name in self.discardnames:
|
||||
continue
|
||||
self.shadowednames[name] = 1
|
||||
self.shadowlines.append(line)
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
def write_out(self, outfile):
|
||||
with open(outfile, 'w') as targ:
|
||||
for line in self.targdata:
|
||||
targ.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
targ.write(line + '\n')
|
||||
if outfile == '/etc/passwd':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/shadow')
|
||||
with open('/etc/shadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':', 1)
|
||||
if name[0] in ('+', '#', '@'):
|
||||
continue
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!:::::::\n')
|
||||
if outfile == '/etc/group':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/gshadow')
|
||||
with open('/etc/gshadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':' , 1)
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!::\n')
|
||||
|
||||
def appendonce(basepath, filename):
|
||||
with open(filename, 'rb') as filehdl:
|
||||
thedata = filehdl.read()
|
||||
targname = filename.replace(basepath, '')
|
||||
try:
|
||||
with open(targname, 'rb') as filehdl:
|
||||
targdata = filehdl.read()
|
||||
except IOError:
|
||||
targdata = b''
|
||||
if thedata in targdata:
|
||||
return
|
||||
with open(targname, 'ab') as targhdl:
|
||||
targhdl.write(thedata)
|
||||
|
||||
def synchronize():
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
appendoncedir = tempfile.mkdtemp()
|
||||
try:
|
||||
ac = apiclient.HTTPSClient()
|
||||
myips = []
|
||||
ipaddrs = subprocess.check_output(['ip', '-br', 'a']).split(b'\n')
|
||||
for line in ipaddrs:
|
||||
isa = line.split()
|
||||
if len(isa) < 3 or isa[1] != b'UP':
|
||||
continue
|
||||
for addr in isa[2:]:
|
||||
if addr.startswith(b'fe80::') or addr.startswith(b'169.254'):
|
||||
continue
|
||||
addr = addr.split(b'/')[0]
|
||||
if not isinstance(addr, str):
|
||||
addr = addr.decode('utf8')
|
||||
myips.append(addr)
|
||||
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
|
||||
if status == 202:
|
||||
lastrsp = ''
|
||||
while status != 204:
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
|
||||
if not isinstance(rsp, str):
|
||||
rsp = rsp.decode('utf8')
|
||||
if status == 200:
|
||||
lastrsp = rsp
|
||||
pendpasswd = os.path.join(tmpdir, 'etc/passwd')
|
||||
if os.path.exists(pendpasswd):
|
||||
cm = CredMerger()
|
||||
cm.read_passwd(pendpasswd, targfile=False)
|
||||
cm.read_passwd('/etc/passwd', targfile=True)
|
||||
cm.write_out('/etc/passwd')
|
||||
pendgroup = os.path.join(tmpdir, 'etc/group')
|
||||
if os.path.exists(pendgroup):
|
||||
cm = CredMerger()
|
||||
cm.read_group(pendgroup, targfile=False)
|
||||
cm.read_group('/etc/group', targfile=True)
|
||||
cm.write_out('/etc/group')
|
||||
pendhosts = os.path.join(tmpdir, 'etc/hosts')
|
||||
if os.path.exists(pendhosts):
|
||||
cm = HostMerger()
|
||||
cm.read_source(pendhosts)
|
||||
cm.read_target('/etc/hosts')
|
||||
cm.write_out('/etc/hosts')
|
||||
for dirn in os.walk(appendoncedir):
|
||||
for filen in dirn[2]:
|
||||
appendonce(appendoncedir, os.path.join(dirn[0], filen))
|
||||
if lastrsp:
|
||||
lastrsp = json.loads(lastrsp)
|
||||
opts = lastrsp.get('options', {})
|
||||
for fname in opts:
|
||||
uid = -1
|
||||
gid = -1
|
||||
for opt in opts[fname]:
|
||||
if opt == 'owner':
|
||||
try:
|
||||
uid = pwd.getpwnam(opts[fname][opt]['name']).pw_uid
|
||||
except KeyError:
|
||||
uid = opts[fname][opt]['id']
|
||||
elif opt == 'group':
|
||||
try:
|
||||
gid = grp.getgrnam(opts[fname][opt]['name']).gr_gid
|
||||
except KeyError:
|
||||
gid = opts[fname][opt]['id']
|
||||
elif opt == 'permissions':
|
||||
os.chmod(fname, int(opts[fname][opt], 8))
|
||||
if uid != -1 or gid != -1:
|
||||
os.chown(fname, uid, gid)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
shutil.rmtree(appendoncedir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
synchronize()
|
@ -9,33 +9,49 @@ MGR=$(grep ^EXTMGRINFO: /custom-installation/confluent/confluent.info |awk -F'|'
|
||||
MGR=$(grep ^MANAGER: /custom-installation/confluent/confluent.info|head -n 1| awk '{print $2}')
|
||||
MGTIFACE=$(grep $MGR /custom-installation/confluent/confluent.info | grep ^EXTMGRINFO: | head -n 1 | awk -F'|' '{print $2}')
|
||||
oum=$(umask)
|
||||
umask 077
|
||||
chroot . custom-installation/confluent/bin/clortho $NODENAME $MGR > /root/custom-installation/confluent/confluent.apikey
|
||||
MGR=[$MGR]
|
||||
deploycfg=/root/custom-installation/confluent/confluent.deploycfg
|
||||
netcfgfile=$deploycfg
|
||||
umask 077
|
||||
if [ -e /tmp/cnflnthmackeytmp ]; then
|
||||
netcfgfile=/tmp/idnttmp
|
||||
hmackeyfile=/tmp/cnflnthmackeytmp
|
||||
#echo -n $(grep ^apitoken: /tmp/identdata/cnflnt.yml|awk '{print $2}') > $hmackeyfile
|
||||
passfile=/tmp/cnflnttmppassfile
|
||||
passcrypt=/tmp/cnflntcryptfile
|
||||
hmacfile=/tmp/cnflnthmacfile
|
||||
chroot . ln -sf /custom-installation/confluent/bin/clortho custom-installation/confluent/bin/genpasshmac
|
||||
cp $hmackeyfile tmp
|
||||
chroot . custom-installation/confluent/bin/genpasshmac $passfile $passcrypt $hmacfile $hmackeyfile
|
||||
chroot . curl -f -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_CRYPTHMAC: $(cat /root/$hmacfile)" -d @/tmp/cnflntcryptfile https://$MGR/confluent-api/self/registerapikey
|
||||
cp /root/$passfile /root/custom-installation/confluent/confluent.apikey
|
||||
DEVICE=$(cat /tmp/autodetectnic)
|
||||
else
|
||||
chroot . custom-installation/confluent/bin/clortho $NODENAME $MGR > /root/custom-installation/confluent/confluent.apikey
|
||||
MGR=[$MGR]
|
||||
nic=$(grep ^MANAGER /custom-installation/confluent/confluent.info|grep fe80::|sed -e s/.*%//|head -n 1)
|
||||
nic=$(ip link |grep ^$nic:|awk '{print $2}')
|
||||
DEVICE=${nic%:}
|
||||
fi
|
||||
if [ -z "$MGTIFACE" ]; then
|
||||
chroot . usr/bin/curl -f -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_APIKEY: $(cat /root//custom-installation/confluent/confluent.apikey)" https://${MGR}/confluent-api/self/deploycfg > $deploycfg
|
||||
else
|
||||
chroot . usr/bin/curl -f -H "CONFLUENT_MGTIFACE: $MGTIFACE" -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_APIKEY: $(cat /root//custom-installation/confluent/confluent.apikey)" https://${MGR}/confluent-api/self/deploycfg > $deploycfg
|
||||
fi
|
||||
umask $oum
|
||||
nic=$(grep ^MANAGER /custom-installation/confluent/confluent.info|grep fe80::|sed -e s/.*%//|head -n 1)
|
||||
nic=$(ip link |grep ^$nic:|awk '{print $2}')
|
||||
DEVICE=${nic%:}
|
||||
ipv4m=$(grep ^ipv4_method $deploycfg|awk '{print$2}')
|
||||
ipv4m=$(grep ^ipv4_method $netcfgfile|awk '{print$2}')
|
||||
. /scripts/functions
|
||||
if [ "$ipv4m" = "dhcp" ]; then
|
||||
IP=dhcp
|
||||
configure_networking
|
||||
elif [ "$ipv4m" = "static" ]; then
|
||||
v4addr=$(grep ^ipv4_address: $deploycfg)
|
||||
v4addr=$(grep ^ipv4_address: $netcfgfile| sed -e 's!/.*!!')
|
||||
v4addr=${v4addr#ipv4_address: }
|
||||
v4gw=$(grep ^ipv4_gateway: $deploycfg)
|
||||
v4gw=$(grep ^ipv4_gateway: $netcfgfile)
|
||||
v4gw=${v4gw#ipv4_gateway: }
|
||||
if [ "$v4gw" = "null" ]; then
|
||||
v4gw=""
|
||||
fi
|
||||
v4nm=$(grep ipv4_netmask: $deploycfg)
|
||||
v4nm=$(grep ipv4_netmask: $netcfgfile)
|
||||
v4nm=${v4nm#ipv4_netmask: }
|
||||
dnsdomain=$(grep ^dnsdomain: $deploycfg)
|
||||
dnsdomain=${dnsdomain#dnsdomain: }
|
||||
|
@ -6,7 +6,7 @@ mkdir -p /custom-installation
|
||||
cp -a /opt/confluent /custom-installation
|
||||
touch /custom-installation/confluent/confluent.info
|
||||
TRIES=5
|
||||
while [ ! -e /dev/disk ] && [ $TRIES -gt 0 ]; do
|
||||
while [ ! -e /dev/disk/by-label ] && [ $TRIES -gt 0 ]; do
|
||||
sleep 2
|
||||
TRIES=$((TRIES - 1))
|
||||
done
|
||||
|
@ -74,8 +74,11 @@ if [ -e /sys/firmware/efi ]; then
|
||||
fi
|
||||
fi
|
||||
cat /target/etc/confluent/tls/*.pem > /target/etc/confluent/ca.pem
|
||||
cat /target/etc/confluent/tls/*.pem > /target/usr/local/share/ca-certificates/confluent.crt
|
||||
cat /target/etc/confluent/tls/*.pem > /etc/confluent/ca.pem
|
||||
chroot /target update-ca-certificates
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python syncfileclient"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python confignet"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d"
|
||||
source /target/etc/confluent/functions
|
||||
|
||||
|
1
confluent_osdeploy/ubuntu22.04-diskless
Symbolic link
1
confluent_osdeploy/ubuntu22.04-diskless
Symbolic link
@ -0,0 +1 @@
|
||||
ubuntu20.04-diskless
|
@ -6,7 +6,7 @@ mkdir -p /custom-installation
|
||||
cp -a /opt/confluent /custom-installation
|
||||
touch /custom-installation/confluent/confluent.info
|
||||
TRIES=5
|
||||
while [ ! -e /dev/disk ] && [ $TRIES -gt 0 ]; do
|
||||
while [ ! -e /dev/disk/by-label ] && [ $TRIES -gt 0 ]; do
|
||||
sleep 2
|
||||
TRIES=$((TRIES - 1))
|
||||
done
|
||||
|
@ -0,0 +1,29 @@
|
||||
Ansible playbooks ending in .yml or .yaml that are placed into this directory will be executed at the
|
||||
appropriate phase of the install process.
|
||||
|
||||
Alternatively, plays may be placed in /var/lib/confluent/private/os/<profilename>/ansible/<directory>.
|
||||
This prevents public clients from being able to read the plays, which is not necessary for them to function,
|
||||
and may protect them from divulging material contained in the plays or associated roles.
|
||||
|
||||
The 'hosts' may be omitted, and if included will be ignored, replaced with the host that is specifically
|
||||
requesting the playbooks be executed.
|
||||
|
||||
Also, the playbooks will be executed on the deployment server. Hence it may be slower in aggregate than
|
||||
running content under scripts/ which ask much less of the deployment server
|
||||
|
||||
Here is an example of what a playbook would look like broadly:
|
||||
|
||||
- name: Example
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Example1
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
line: 1.2.3.4 test1
|
||||
create: yes
|
||||
- name: Example2
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
line: 1.2.3.5 test2
|
||||
create: yes
|
||||
|
@ -0,0 +1,29 @@
|
||||
Ansible playbooks ending in .yml or .yaml that are placed into this directory will be executed at the
|
||||
appropriate phase of the install process.
|
||||
|
||||
Alternatively, plays may be placed in /var/lib/confluent/private/os/<profilename>/ansible/<directory>.
|
||||
This prevents public clients from being able to read the plays, which is not necessary for them to function,
|
||||
and may protect them from divulging material contained in the plays or associated roles.
|
||||
|
||||
The 'hosts' may be omitted, and if included will be ignored, replaced with the host that is specifically
|
||||
requesting the playbooks be executed.
|
||||
|
||||
Also, the playbooks will be executed on the deployment server. Hence it may be slower in aggregate than
|
||||
running content under scripts/ which ask much less of the deployment server
|
||||
|
||||
Here is an example of what a playbook would look like broadly:
|
||||
|
||||
- name: Example
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Example1
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
line: 1.2.3.4 test1
|
||||
create: yes
|
||||
- name: Example2
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
line: 1.2.3.5 test2
|
||||
create: yes
|
||||
|
@ -2,7 +2,10 @@
|
||||
echo "Confluent first boot is running"
|
||||
HOME=$(getent passwd $(whoami)|cut -d: -f 6)
|
||||
export HOME
|
||||
seems a potentially relevant thing to put i... by Jarrod Johnson
|
||||
(
|
||||
exec >> /target/var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /target/var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /target/var/log/confluent/confluent-firstboot.log
|
||||
cp -a /etc/confluent/ssh/* /etc/ssh/
|
||||
systemctl restart sshd
|
||||
rootpw=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg |awk '{print $2}')
|
||||
@ -18,7 +21,10 @@ done
|
||||
hostnamectl set-hostname $(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}')
|
||||
touch /etc/cloud/cloud-init.disabled
|
||||
source /etc/confluent/functions
|
||||
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export confluent_mgr confluent_profile
|
||||
run_remote_parts firstboot.d
|
||||
run_remote_config firstboot.d
|
||||
curl --capath /etc/confluent/tls -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -X POST -d "status: complete" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
) &
|
||||
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console
|
||||
|
@ -8,7 +8,6 @@ chmod go-rwx /etc/confluent/*
|
||||
for i in /custom-installation/ssh/*.ca; do
|
||||
echo '@cert-authority *' $(cat $i) >> /target/etc/ssh/ssh_known_hosts
|
||||
done
|
||||
|
||||
cp -a /etc/ssh/ssh_host* /target/etc/confluent/ssh/
|
||||
cp -a /etc/ssh/sshd_config.d/confluent.conf /target/etc/confluent/ssh/sshd_config.d/
|
||||
sshconf=/target/etc/ssh/ssh_config
|
||||
@ -19,10 +18,15 @@ echo 'Host *' >> $sshconf
|
||||
echo ' HostbasedAuthentication yes' >> $sshconf
|
||||
echo ' EnableSSHKeysign yes' >> $sshconf
|
||||
echo ' HostbasedKeyTypes *ed25519*' >> $sshconf
|
||||
|
||||
cp /etc/confluent/functions /target/etc/confluent/functions
|
||||
source /etc/confluent/functions
|
||||
mkdir -p /target/var/log/confluent
|
||||
cp /var/log/confluent/* /target/var/log/confluent/
|
||||
(
|
||||
exec >> /target/var/log/confluent/confluent-post.log
|
||||
exec 2>> /target/var/log/confluent/confluent-post.log
|
||||
chmod 600 /target/var/log/confluent/confluent-post.log
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /target/etc/confluent/firstboot.sh
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /target/etc/confluent/functions
|
||||
source /target/etc/confluent/functions
|
||||
chmod +x /target/etc/confluent/firstboot.sh
|
||||
cp /tmp/allnodes /target/root/.shosts
|
||||
cp /tmp/allnodes /target/etc/ssh/shosts.equiv
|
||||
@ -74,12 +78,17 @@ if [ -e /sys/firmware/efi ]; then
|
||||
fi
|
||||
fi
|
||||
cat /target/etc/confluent/tls/*.pem > /target/etc/confluent/ca.pem
|
||||
cat /target/etc/confluent/tls/*.pem > /target/usr/local/share/ca-certificates/confluent.crt
|
||||
cat /target/etc/confluent/tls/*.pem > /etc/confluent/ca.pem
|
||||
chroot /target update-ca-certificates
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python syncfileclient"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python confignet"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d"
|
||||
source /target/etc/confluent/functions
|
||||
|
||||
run_remote_config post
|
||||
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged'
|
||||
|
||||
umount /target/sys /target/dev /target/proc
|
||||
|
||||
) &
|
||||
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console
|
||||
|
0
confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.d/.gitignore
vendored
Normal file
0
confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.d/.gitignore
vendored
Normal file
@ -1,5 +1,16 @@
|
||||
#!/bin/bash
|
||||
deploycfg=/custom-installation/confluent/confluent.deploycfg
|
||||
mkdir -p /var/log/confluent
|
||||
mkdir -p /opt/confluent/bin
|
||||
mkdir -p /etc/confluent
|
||||
cp /custom-installation/confluent/confluent.info /custom-installation/confluent/confluent.apikey /etc/confluent/
|
||||
cat /custom-installation/tls/*.pem >> /etc/confluent/ca.pem
|
||||
cp /custom-installation/confluent/bin/apiclient /opt/confluent/bin
|
||||
cp $deploycfg /etc/confluent/
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-pre.log
|
||||
exec 2>> /var/log/confluent/confluent-pre.log
|
||||
chmod 600 /var/log/confluent/confluent-pre.log
|
||||
|
||||
cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //')
|
||||
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
|
||||
@ -23,7 +34,17 @@ echo HostbasedAuthentication yes >> /etc/ssh/sshd_config.d/confluent.conf
|
||||
echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config.d/confluent.conf
|
||||
echo IgnoreRhosts no >> /etc/ssh/sshd_config.d/confluent.conf
|
||||
systemctl restart sshd
|
||||
mkdir -p /etc/confluent
|
||||
export confluent_profile confluent_mgr
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions
|
||||
. /etc/confluent/functions
|
||||
run_remote_parts pre.d
|
||||
curl -f -X POST -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $apikey" https://$confluent_mgr/confluent-api/self/nodelist > /tmp/allnodes
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /custom-installation/getinstalldisk
|
||||
python3 /custom-installation/getinstalldisk
|
||||
if [ ! -e /tmp/installdisk ]; then
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /custom-installation/getinstalldisk
|
||||
python3 /custom-installation/getinstalldisk
|
||||
fi
|
||||
sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml
|
||||
) &
|
||||
tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console
|
||||
|
||||
|
@ -36,7 +36,7 @@ if [ "$OPKGNAME" = "confluent-server" ]; then
|
||||
if grep wheezy /etc/os-release; then
|
||||
sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl, python-msgpack/' debian/control
|
||||
else
|
||||
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko/' debian/control
|
||||
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd/' debian/control
|
||||
fi
|
||||
if grep wheezy /etc/os-release; then
|
||||
echo 'confluent_client python-confluent-client' >> debian/pydist-overrides
|
||||
@ -49,6 +49,13 @@ if ! grep wheezy /etc/os-release; then
|
||||
fi
|
||||
head -n -1 debian/control > debian/control1
|
||||
mv debian/control1 debian/control
|
||||
cat > debian/postinst << EOF
|
||||
if ! getent passwd confluent > /dev/null; then
|
||||
useradd -r confluent -d /var/lib/confluent -s /usr/sbin/nologin
|
||||
mkdir -p /etc/confluent
|
||||
chown confluent /etc/confluent
|
||||
fi
|
||||
EOF
|
||||
echo 'export PYBUILD_INSTALL_ARGS=--install-lib=/opt/confluent/lib/python' >> debian/rules
|
||||
#echo 'Provides: python-'$DPKGNAME >> debian/control
|
||||
#echo 'Conflicts: python-'$DPKGNAME >> debian/control
|
||||
|
@ -259,6 +259,9 @@ def get_myname():
|
||||
mycachedname[1] = time.time()
|
||||
return myname
|
||||
|
||||
def in_collective():
|
||||
return bool(list(cfm.list_collective()))
|
||||
|
||||
def handle_connection(connection, cert, request, local=False):
|
||||
global currentleader
|
||||
global retrythread
|
||||
@ -713,6 +716,7 @@ def become_leader(connection):
|
||||
if reassimilate is not None:
|
||||
reassimilate.kill()
|
||||
reassimilate = eventlet.spawn(reassimilate_missing)
|
||||
cfm._ready = True
|
||||
if _assimilate_missing(skipaddr):
|
||||
schedule_rebalance()
|
||||
|
||||
|
@ -371,7 +371,7 @@ node = {
|
||||
'the managed node. If not specified, then console '
|
||||
'is disabled. "ipmi" should be specified for most '
|
||||
'systems if console is desired.'),
|
||||
'validvalues': ('ssh', 'ipmi', 'tsmsol'),
|
||||
'validvalues': ('ssh', 'ipmi', 'openbmc', 'tsmsol'),
|
||||
},
|
||||
# 'virtualization.host': {
|
||||
# 'description': ('Hypervisor where this node does/should reside'),
|
||||
|
@ -119,6 +119,7 @@ _cfgstore = None
|
||||
_pendingchangesets = {}
|
||||
_txcount = 0
|
||||
_hasquorum = True
|
||||
_ready = False
|
||||
|
||||
_attraliases = {
|
||||
'bmc': 'hardwaremanagement.manager',
|
||||
@ -830,6 +831,9 @@ _oldcfgstore = None
|
||||
_oldtxcount = 0
|
||||
|
||||
|
||||
def config_is_ready():
|
||||
return _ready
|
||||
|
||||
def rollback_clear():
|
||||
global _cfgstore
|
||||
global _txcount
|
||||
@ -847,6 +851,8 @@ def clear_configuration():
|
||||
global _txcount
|
||||
global _oldcfgstore
|
||||
global _oldtxcount
|
||||
global _ready
|
||||
_ready = False
|
||||
stop_leading()
|
||||
stop_following()
|
||||
_oldcfgstore = _cfgstore
|
||||
@ -857,6 +863,7 @@ def clear_configuration():
|
||||
def commit_clear():
|
||||
global _oldtxcount
|
||||
global _oldcfgstore
|
||||
global _ready
|
||||
# first, copy over old non-key globals, as those are
|
||||
# currently defined as local to each collective member
|
||||
# currently just 'autosense' which is intended to be active
|
||||
@ -876,6 +883,7 @@ def commit_clear():
|
||||
pass
|
||||
ConfigManager.wait_for_sync(True)
|
||||
ConfigManager._bg_sync_to_file()
|
||||
_ready = True
|
||||
|
||||
cfgleader = None
|
||||
|
||||
@ -1273,6 +1281,7 @@ class ConfigManager(object):
|
||||
def __init__(self, tenant, decrypt=False, username=None):
|
||||
self.clientfiles = {}
|
||||
global _cfgstore
|
||||
self.inrestore = False
|
||||
with _initlock:
|
||||
if _cfgstore is None:
|
||||
init()
|
||||
@ -2089,6 +2098,10 @@ class ConfigManager(object):
|
||||
def _notif_attribwatchers(self, nodeattrs):
|
||||
if self.tenant not in self._attribwatchers:
|
||||
return
|
||||
if self.inrestore:
|
||||
# Do not stir up attribute watchers during a collective join or DB restore,
|
||||
# it's too hectic of a time to react
|
||||
return
|
||||
notifdata = {}
|
||||
attribwatchers = self._attribwatchers[self.tenant]
|
||||
for node in nodeattrs:
|
||||
@ -2471,6 +2484,13 @@ class ConfigManager(object):
|
||||
#TODO: wait for synchronization to suceed/fail??)
|
||||
|
||||
def _load_from_json(self, jsondata, sync=True):
|
||||
self.inrestore = True
|
||||
try:
|
||||
self._load_from_json_backend(jsondata, sync=True)
|
||||
finally:
|
||||
self.inrestore = False
|
||||
|
||||
def _load_from_json_backend(self, jsondata, sync=True):
|
||||
"""Load fresh configuration data from jsondata
|
||||
|
||||
:param jsondata: String of jsondata
|
||||
@ -2939,9 +2959,9 @@ def get_globals():
|
||||
bkupglobals[globvar] = _cfgstore['globals'][globvar]
|
||||
return bkupglobals
|
||||
|
||||
|
||||
def init(stateless=False):
|
||||
global _cfgstore
|
||||
global _ready
|
||||
if stateless:
|
||||
_cfgstore = {}
|
||||
return
|
||||
@ -2949,6 +2969,9 @@ def init(stateless=False):
|
||||
ConfigManager._read_from_path()
|
||||
except IOError:
|
||||
_cfgstore = {}
|
||||
members = list(list_collective())
|
||||
if len(members) < 2:
|
||||
_ready = True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -447,6 +447,7 @@ def _init_core():
|
||||
},
|
||||
},
|
||||
},
|
||||
'layout': PluginRoute({'handler': 'layout'}),
|
||||
'media': {
|
||||
'uploads': PluginCollection({
|
||||
'pluginattrs': ['hardwaremanagement.method'],
|
||||
|
@ -648,6 +648,8 @@ def detected_models():
|
||||
|
||||
|
||||
def _recheck_nodes(nodeattribs, configmanager):
|
||||
if not cfm.config_is_ready():
|
||||
return
|
||||
if rechecklock.locked():
|
||||
# if already in progress, don't run again
|
||||
# it may make sense to schedule a repeat, but will try the easier and less redundant way first
|
||||
@ -766,6 +768,9 @@ def eval_detected(info):
|
||||
def detected(info):
|
||||
global rechecker
|
||||
global rechecktime
|
||||
if not cfm.config_is_ready():
|
||||
# drop processing of discovery data while configmanager is 'down'
|
||||
return
|
||||
# later, manual and CMM discovery may act on SN and/or UUID
|
||||
for service in info['services']:
|
||||
if service in nodehandlers:
|
||||
@ -1429,7 +1434,12 @@ def discover_node(cfg, handler, info, nodename, manual):
|
||||
newnodeattribs['pubkeys.tls_hardwaremanager'] = \
|
||||
util.get_fingerprint(handler.https_cert, 'sha256')
|
||||
if newnodeattribs:
|
||||
cfg.set_node_attributes({nodename: newnodeattribs})
|
||||
currattrs = cfg.get_node_attributes(nodename, newnodeattribs)
|
||||
for checkattr in newnodeattribs:
|
||||
checkval = currattrs.get(nodename, {}).get(checkattr, {}).get('value', None)
|
||||
if checkval != newnodeattribs[checkattr]:
|
||||
cfg.set_node_attributes({nodename: newnodeattribs})
|
||||
break
|
||||
log.log({'info': 'Discovered {0} ({1})'.format(nodename,
|
||||
handler.devname)})
|
||||
if nodeconfig:
|
||||
@ -1508,7 +1518,12 @@ def do_pxe_discovery(cfg, handler, info, manual, nodename, policies):
|
||||
if info['hwaddr'] != oldhwaddr:
|
||||
attribs[newattrname] = info['hwaddr']
|
||||
if attribs:
|
||||
cfg.set_node_attributes({nodename: attribs})
|
||||
currattrs = cfg.get_node_attributes(nodename, attribs)
|
||||
for checkattr in attribs:
|
||||
checkval = currattrs.get(nodename, {}).get(checkattr, {}).get('value', None)
|
||||
if checkval != attribs[checkattr]:
|
||||
cfg.set_node_attributes({nodename: attribs})
|
||||
break
|
||||
if info['uuid'] in known_pxe_uuids:
|
||||
return True
|
||||
if uuid_is_valid(info['uuid']):
|
||||
@ -1597,7 +1612,10 @@ def remotescan():
|
||||
mycfm = cfm.ConfigManager(None)
|
||||
myname = collective.get_myname()
|
||||
for remagent in get_subscriptions():
|
||||
affluent.renotify_me(remagent, mycfm, myname)
|
||||
try:
|
||||
affluent.renotify_me(remagent, mycfm, myname)
|
||||
except Exception as e:
|
||||
log.log({'error': 'Unexpected problem asking {} for discovery notifications'.format(remagent)})
|
||||
|
||||
|
||||
def blocking_scan():
|
||||
@ -1637,7 +1655,7 @@ def start_autosense():
|
||||
autosensors.add(eventlet.spawn(slp.snoop, safe_detected, slp))
|
||||
#autosensors.add(eventlet.spawn(mdns.snoop, safe_detected, mdns))
|
||||
autosensors.add(eventlet.spawn(pxe.snoop, safe_detected, pxe, get_node_guess_by_uuid))
|
||||
remotescan()
|
||||
eventlet.spawn(remotescan)
|
||||
|
||||
|
||||
nodes_by_fprint = {}
|
||||
|
@ -326,7 +326,7 @@ def run(args):
|
||||
break
|
||||
except Exception:
|
||||
eventlet.sleep(0.5)
|
||||
disco.start_detection()
|
||||
eventlet.spawn_n(disco.start_detection)
|
||||
eventlet.sleep(1)
|
||||
consoleserver.start_console_sessions()
|
||||
while 1:
|
||||
|
@ -92,6 +92,7 @@ def msg_deserialize(packed):
|
||||
return cls(*m[1:])
|
||||
raise Exception("Unknown shenanigans")
|
||||
|
||||
|
||||
class ConfluentMessage(object):
|
||||
apicode = 200
|
||||
readonly = False
|
||||
@ -254,6 +255,21 @@ class ConfluentNodeError(object):
|
||||
raise Exception('{0}: {1}'.format(self.node, self.error))
|
||||
|
||||
|
||||
class Generic(ConfluentMessage):
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def json(self):
|
||||
return json.dumps(self.data)
|
||||
|
||||
def raw(self):
|
||||
return self.data
|
||||
|
||||
def html(self):
|
||||
return json.dumps(self.data)
|
||||
|
||||
|
||||
class ConfluentResourceUnavailable(ConfluentNodeError):
|
||||
apicode = 503
|
||||
|
||||
|
@ -332,6 +332,7 @@ def get_full_net_config(configmanager, node, serverip=None):
|
||||
if serverip:
|
||||
myaddrs = get_addresses_by_serverip(serverip)
|
||||
nm = NetManager(myaddrs, node, configmanager)
|
||||
defaultnic = {}
|
||||
if None in attribs:
|
||||
nm.process_attribs(None, attribs[None])
|
||||
del attribs[None]
|
||||
@ -342,9 +343,44 @@ def get_full_net_config(configmanager, node, serverip=None):
|
||||
retattrs['default'] = nm.myattribs[None]
|
||||
add_netmask(retattrs['default'])
|
||||
del nm.myattribs[None]
|
||||
else:
|
||||
nnc = get_nic_config(configmanager, node, serverip=serverip)
|
||||
if nnc.get('ipv4_address', None):
|
||||
defaultnic['ipv4_address'] = '{}/{}'.format(nnc['ipv4_address'], nnc['prefix'])
|
||||
if nnc.get('ipv4_gateway', None):
|
||||
defaultnic['ipv4_gateway'] = nnc['ipv4_gateway']
|
||||
if nnc.get('ipv4_method', None):
|
||||
defaultnic['ipv4_method'] = nnc['ipv4_method']
|
||||
if nnc.get('ipv6_address', None):
|
||||
defaultnic['ipv6_address'] = '{}/{}'.format(nnc['ipv6_address'], nnc['ipv6_prefix'])
|
||||
if nnc.get('ipv6_method', None):
|
||||
defaultnic['ipv6_method'] = nnc['ipv6_method']
|
||||
retattrs['extranets'] = nm.myattribs
|
||||
for attri in retattrs['extranets']:
|
||||
add_netmask(retattrs['extranets'][attri])
|
||||
if retattrs['extranets'][attri].get('ipv4_address', None) == defaultnic.get('ipv4_address', 'NOPE'):
|
||||
defaultnic = {}
|
||||
if retattrs['extranets'][attri].get('ipv6_address', None) == defaultnic.get('ipv6_address', 'NOPE'):
|
||||
defaultnic = {}
|
||||
if defaultnic:
|
||||
retattrs['default'] = defaultnic
|
||||
add_netmask(retattrs['default'])
|
||||
ipv4addr = defaultnic.get('ipv4_address', None)
|
||||
if ipv4addr and '/' in ipv4addr:
|
||||
ipv4bytes = socket.inet_pton(socket.AF_INET, ipv4addr.split('/')[0])
|
||||
for addr in nm.myaddrs:
|
||||
if addr[0] != socket.AF_INET:
|
||||
continue
|
||||
if ipn_on_same_subnet(addr[0], addr[1], ipv4bytes, addr[2]):
|
||||
defaultnic['current_nic'] = True
|
||||
ipv6addr = defaultnic.get('ipv6_address', None)
|
||||
if ipv6addr and '/' in ipv6addr:
|
||||
ipv6bytes = socket.inet_pton(socket.AF_INET6, ipv6addr.split('/')[0])
|
||||
for addr in nm.myaddrs:
|
||||
if addr[0] != socket.AF_INET6:
|
||||
continue
|
||||
if ipn_on_same_subnet(addr[0], addr[1], ipv6bytes, addr[2]):
|
||||
defaultnic['current_nic'] = True
|
||||
return retattrs
|
||||
|
||||
|
||||
|
@ -49,10 +49,11 @@ import eventlet.green.select as select
|
||||
|
||||
import eventlet.green.socket as socket
|
||||
|
||||
|
||||
import confluent.collective.manager as collective
|
||||
import confluent.exceptions as exc
|
||||
import confluent.log as log
|
||||
import confluent.messages as msg
|
||||
import confluent.noderange as noderange
|
||||
import confluent.util as util
|
||||
from eventlet.greenpool import GreenPool
|
||||
import eventlet.green.subprocess as subprocess
|
||||
@ -502,10 +503,21 @@ def _full_updatemacmap(configmanager):
|
||||
'Network topology not available to tenants')
|
||||
# here's a list of switches... need to add nodes that are switches
|
||||
nodelocations = configmanager.get_node_attributes(
|
||||
configmanager.list_nodes(), ('type', 'net*.switch', 'net*.switchport'))
|
||||
configmanager.list_nodes(), ('type', 'collective.managercandidates', 'net*.switch', 'net*.switchport'))
|
||||
switches = set([])
|
||||
incollective = collective.in_collective()
|
||||
if incollective:
|
||||
mycollectivename = collective.get_myname()
|
||||
for node in nodelocations:
|
||||
cfg = nodelocations[node]
|
||||
if incollective:
|
||||
candmgrs = cfg.get('collective.managercandidates', {}).get('value', None)
|
||||
if candmgrs:
|
||||
candmgrs = noderange.NodeRange(candmgrs, configmanager).nodes
|
||||
if mycollectivename not in candmgrs:
|
||||
# do not think about trying to find nodes that we aren't possibly
|
||||
# supposed to be a manager for in a collective
|
||||
continue
|
||||
if cfg.get('type', {}).get('value', None) == 'switch':
|
||||
switches.add(node)
|
||||
for attr in cfg:
|
||||
|
@ -2,6 +2,7 @@
|
||||
import eventlet
|
||||
import eventlet.green.select as select
|
||||
import eventlet.green.subprocess as subprocess
|
||||
from fnmatch import fnmatch
|
||||
import glob
|
||||
import logging
|
||||
logging.getLogger('libarchive').addHandler(logging.NullHandler())
|
||||
@ -153,6 +154,14 @@ def update_boot_esxi(profiledir, profile, label):
|
||||
'{0}/boot.img'.format(profiledir), profname], preexec_fn=relax_umask)
|
||||
|
||||
|
||||
def find_glob(loc, fileglob):
|
||||
for cdir, _, fs in os.walk(loc):
|
||||
for f in fs:
|
||||
if fnmatch(f, fileglob):
|
||||
return os.path.join(cdir, f)
|
||||
return None
|
||||
|
||||
|
||||
def update_boot_linux(profiledir, profile, label):
|
||||
profname = os.path.basename(profiledir)
|
||||
kernelargs = profile.get('kernelargs', '')
|
||||
@ -170,7 +179,11 @@ def update_boot_linux(profiledir, profile, label):
|
||||
for initramfs in initrds:
|
||||
grubcfg += " /initramfs/{0}".format(initramfs)
|
||||
grubcfg += "\n}\n"
|
||||
with open(profiledir + '/boot/efi/boot/grub.cfg', 'w') as grubout:
|
||||
# well need to honor grubprefix path if different
|
||||
grubcfgpath = find_glob(profiledir + '/boot', 'grub.cfg')
|
||||
if not grubcfgpath:
|
||||
grubcfgpath = profiledir + '/boot/efi/boot/grub.cfg'
|
||||
with open(grubcfgpath, 'w') as grubout:
|
||||
grubout.write(grubcfg)
|
||||
ipxeargs = kernelargs
|
||||
for initramfs in initrds:
|
||||
|
160
confluent_server/confluent/plugins/console/openbmc.py
Normal file
160
confluent_server/confluent/plugins/console/openbmc.py
Normal file
@ -0,0 +1,160 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2015-2019 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This plugin provides an ssh implementation comforming to the 'console'
|
||||
# specification. consoleserver or shellserver would be equally likely
|
||||
# to use this.
|
||||
|
||||
import confluent.exceptions as cexc
|
||||
import confluent.interface.console as conapi
|
||||
import confluent.log as log
|
||||
import confluent.util as util
|
||||
import pyghmi.exceptions as pygexc
|
||||
import pyghmi.redfish.command as rcmd
|
||||
import pyghmi.util.webclient as webclient
|
||||
import eventlet
|
||||
import eventlet.green.ssl as ssl
|
||||
try:
|
||||
websocket = eventlet.import_patched('websocket')
|
||||
wso = websocket.WebSocket
|
||||
except Exception:
|
||||
wso = object
|
||||
|
||||
def get_conn_params(node, configdata):
|
||||
if 'secret.hardwaremanagementuser' in configdata:
|
||||
username = configdata['secret.hardwaremanagementuser']['value']
|
||||
else:
|
||||
username = 'USERID'
|
||||
if 'secret.hardwaremanagementpassword' in configdata:
|
||||
passphrase = configdata['secret.hardwaremanagementpassword']['value']
|
||||
else:
|
||||
passphrase = 'PASSW0RD' # for lack of a better guess
|
||||
if 'hardwaremanagement.manager' in configdata:
|
||||
bmc = configdata['hardwaremanagement.manager']['value']
|
||||
else:
|
||||
bmc = node
|
||||
bmc = bmc.split('/', 1)[0]
|
||||
return {
|
||||
'username': username,
|
||||
'passphrase': passphrase,
|
||||
'bmc': bmc,
|
||||
}
|
||||
_configattributes = ('secret.hardwaremanagementuser',
|
||||
'secret.hardwaremanagementpassword',
|
||||
'hardwaremanagement.manager')
|
||||
|
||||
class WrappedWebSocket(wso):
|
||||
|
||||
def set_verify_callback(self, callback):
|
||||
self._certverify = callback
|
||||
|
||||
def connect(self, url, **options):
|
||||
add_tls = url.startswith('wss://')
|
||||
if add_tls:
|
||||
hostname, port, resource, _ = websocket._url.parse_url(url)
|
||||
if hostname[0] != '[' and ':' in hostname:
|
||||
hostname = '[{0}]'.format(hostname)
|
||||
if resource[0] != '/':
|
||||
resource = '/{0}'.format(resource)
|
||||
url = 'ws://{0}:443{1}'.format(hostname,resource)
|
||||
else:
|
||||
return super(WrappedWebSocket, self).connect(url, **options)
|
||||
self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout)
|
||||
self.sock, addrs = websocket._http.connect(url, self.sock_opt, websocket._http.proxy_info(**options),
|
||||
options.pop('socket', None))
|
||||
self.sock = ssl.wrap_socket(self.sock, cert_reqs=ssl.CERT_NONE)
|
||||
# The above is supersedeed by the _certverify, which provides
|
||||
# known-hosts style cert validaiton
|
||||
bincert = self.sock.getpeercert(binary_form=True)
|
||||
if not self._certverify(bincert):
|
||||
raise pygexc.UnrecognizedCertificate('Unknown certificate', bincert)
|
||||
try:
|
||||
self.handshake_response = websocket._handshake.handshake(self.sock, *addrs, **options)
|
||||
if self.handshake_response.status in websocket._handshake.SUPPORTED_REDIRECT_STATUSES:
|
||||
options['redirect_limit'] = options.pop('redirect_limit', 3) - 1
|
||||
if options['redirect_limit'] < 0:
|
||||
raise Exception('Redirect limit hit')
|
||||
url = self.handshake_response.headers['location']
|
||||
self.sock.close()
|
||||
return self.connect(url, **options)
|
||||
self.connected = True
|
||||
except:
|
||||
if self.sock:
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
raise
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class TsmConsole(conapi.Console):
|
||||
|
||||
def __init__(self, node, config):
|
||||
self.node = node
|
||||
self.ws = None
|
||||
configdata = config.get_node_attributes([node], _configattributes, decrypt=True)
|
||||
connparams = get_conn_params(node, configdata[node])
|
||||
self.username = connparams['username']
|
||||
self.password = connparams['passphrase']
|
||||
self.bmc = connparams['bmc']
|
||||
self.origbmc = connparams['bmc']
|
||||
if ':' in self.bmc:
|
||||
self.bmc = '[{0}]'.format(self.bmc)
|
||||
self.datacallback = None
|
||||
self.nodeconfig = config
|
||||
self.connected = False
|
||||
|
||||
|
||||
def recvdata(self):
|
||||
while self.connected:
|
||||
pendingdata = self.ws.recv()
|
||||
if pendingdata == '':
|
||||
self.datacallback(conapi.ConsoleEvent.Disconnect)
|
||||
return
|
||||
self.datacallback(pendingdata)
|
||||
|
||||
def connect(self, callback):
|
||||
self.datacallback = callback
|
||||
kv = util.TLSCertVerifier(
|
||||
self.nodeconfig, self.node, 'pubkeys.tls_hardwaremanager').verify_cert
|
||||
wc = webclient.SecureHTTPConnection(self.origbmc, 443, verifycallback=kv)
|
||||
rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json'})
|
||||
bmc = self.bmc
|
||||
if '%' in self.bmc:
|
||||
prefix = self.bmc.split('%')[0]
|
||||
bmc = prefix + ']'
|
||||
self.ws = WrappedWebSocket(host=bmc)
|
||||
self.ws.set_verify_callback(kv)
|
||||
self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']))
|
||||
self.connected = True
|
||||
eventlet.spawn_n(self.recvdata)
|
||||
return
|
||||
|
||||
def write(self, data):
|
||||
self.ws.send(data)
|
||||
|
||||
def close(self):
|
||||
if self.ws:
|
||||
self.ws.close()
|
||||
self.connected = False
|
||||
self.datacallback = None
|
||||
|
||||
def create(nodes, element, configmanager, inputdata):
|
||||
if len(nodes) == 1:
|
||||
return TsmConsole(nodes[0], configmanager)
|
@ -210,10 +210,12 @@ def xml2stateinfo(statdata):
|
||||
stateinfo = []
|
||||
sensornames = sorted([x.tag for x in statdata])
|
||||
themodel = None
|
||||
for model in sensorsbymodel:
|
||||
if sensorsbymodel[model] == sensornames:
|
||||
for model in sorted(sensorsbymodel):
|
||||
if all([x in sensornames for x in sensorsbymodel[model]]):
|
||||
themodel = model
|
||||
break
|
||||
else:
|
||||
print(repr(sensornames))
|
||||
thesensors = _thesensors[themodel]
|
||||
#['mode', 't1', 't2a', 't2b', 't2c', 't2', 't5', 't3', 't4', 'dw', 't3', 'rh', 'setpoint', 'secflow', 'primflow', 'ps1', 'ps1a', 'ps1b', 'ps2', 'ps3', 'ps4', 'ps5a', 'ps5b', 'ps5c', 'sdp', 'valve', 'valve2', 'pumpspeed1', 'pumpspeed2', 'pumpspeed3', 'alarms', 'dt', 'p3state', 'duty']
|
||||
for tagname in thesensors:
|
||||
|
@ -25,6 +25,10 @@ import hashlib
|
||||
import json
|
||||
import time
|
||||
|
||||
def simplify_name(name):
|
||||
return name.lower().replace(' ', '_').replace('/', '-').replace(
|
||||
'_-_', '-')
|
||||
|
||||
#eaton uses 'eval' rather than json, massage it to be valid json
|
||||
def sanitize_json(data):
|
||||
if not isinstance(data, str):
|
||||
@ -131,6 +135,43 @@ class WebConnection(wc.SecureHTTPConnection):
|
||||
body = rsp.read()
|
||||
return body, rsp.status
|
||||
|
||||
_sensors_by_node = {}
|
||||
def get_sensor_data(element, node, configmanager):
|
||||
category, name = element[-2:]
|
||||
justnames = False
|
||||
readings = []
|
||||
if len(element) == 3:
|
||||
# just get names
|
||||
category = name
|
||||
name = 'all'
|
||||
justnames = True
|
||||
if category in ('leds, fans', 'temperature'):
|
||||
return
|
||||
sn = _sensors_by_node.get(node, None)
|
||||
if not sn or sn[1] < time.time():
|
||||
gc = PDUClient(node, configmanager)
|
||||
try:
|
||||
sdata = gc.get_sensor_data()
|
||||
finally:
|
||||
gc.logout()
|
||||
_sensors_by_node[node] = [sdata, time.time() + 1]
|
||||
sn = _sensors_by_node.get(node, None)
|
||||
for outlet in sn[0]:
|
||||
for sensename in sn[0][outlet]:
|
||||
myname = 'Outlet {0} {1}'.format(outlet, sensename)
|
||||
measurement = sn[0][outlet][sensename]
|
||||
if name == 'all' or simplify_name(myname) == name:
|
||||
readings.append({
|
||||
'name': myname,
|
||||
'value': float(measurement['value']),
|
||||
'units': measurement['units'],
|
||||
'type': measurement['type'],
|
||||
})
|
||||
if justnames:
|
||||
for reading in readings:
|
||||
yield msg.ChildCollection(simplify_name(reading['name']))
|
||||
else:
|
||||
yield msg.SensorReadings(readings, name=node)
|
||||
|
||||
|
||||
class PDUClient(object):
|
||||
@ -231,6 +272,28 @@ class PDUClient(object):
|
||||
if outdata[0] == outlet:
|
||||
return 'on' if outdata[3] else 'off'
|
||||
return
|
||||
|
||||
def get_sensor_data(self):
|
||||
rsp = self.do_request('cgi_pdu_outlets')
|
||||
data = sanitize_json(rsp[0])
|
||||
data = json.loads(data)
|
||||
data = data['data'][0]
|
||||
sdata = {}
|
||||
for outdata in data:
|
||||
outsense = {}
|
||||
outletname = outdata[0][0]
|
||||
outsense['Energy'] = {
|
||||
'value': float(outdata[11] / 1000),
|
||||
'units': 'kwh',
|
||||
'type': 'Energy'
|
||||
}
|
||||
outsense['Power'] = {
|
||||
'value': float(outdata[4]),
|
||||
'units': 'w',
|
||||
'type': 'Power',
|
||||
}
|
||||
sdata[outletname] = outsense
|
||||
return sdata
|
||||
|
||||
def set_outlet(self, outlet, state):
|
||||
rsp = self.do_request('cgi_pdu_outlets')
|
||||
@ -247,7 +310,12 @@ class PDUClient(object):
|
||||
idx += 1
|
||||
|
||||
def retrieve(nodes, element, configmanager, inputdata):
|
||||
if 'outlets' not in element:
|
||||
if element[0] == 'sensors':
|
||||
for node in nodes:
|
||||
for res in get_sensor_data(element, node, configmanager):
|
||||
yield res
|
||||
return
|
||||
elif 'outlets' not in element:
|
||||
for node in nodes:
|
||||
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
|
||||
return
|
||||
|
100
confluent_server/confluent/plugins/info/layout.py
Normal file
100
confluent_server/confluent/plugins/info/layout.py
Normal file
@ -0,0 +1,100 @@
|
||||
# Copyright 2023 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import confluent.core as core
|
||||
import confluent.messages as msg
|
||||
|
||||
def retrieve(nodes, element, configmanager, inputdata):
|
||||
locationinfo = configmanager.get_node_attributes(nodes,
|
||||
(u'enclosure.manager', u'enclosure.bay', u'location.rack',
|
||||
u'location.row', u'location.u', u'location.height'))
|
||||
enclosuremap = {}
|
||||
rackmap = {}
|
||||
allnodedata = {}
|
||||
needenclosures = set([])
|
||||
locatednodes = set([])
|
||||
for node in locationinfo:
|
||||
nodeinfo = locationinfo[node]
|
||||
rack = nodeinfo.get(u'location.rack', {}).get('value', '')
|
||||
u = nodeinfo.get(u'location.u', {}).get('value', None)
|
||||
row = nodeinfo.get(u'location.row', {}).get('value', '')
|
||||
enclosure = nodeinfo.get(u'enclosure.manager', {}).get('value', None)
|
||||
bay = nodeinfo.get(u'enclosure.bay', {}).get('value', None)
|
||||
height = nodeinfo.get(u'location.height', {}).get('value', None)
|
||||
if enclosure:
|
||||
if enclosure not in enclosuremap:
|
||||
enclosuremap[enclosure] = {}
|
||||
enclosuremap[enclosure][bay] = node
|
||||
if u:
|
||||
if row not in rackmap:
|
||||
rackmap[row] = {}
|
||||
if rack not in rackmap[row]:
|
||||
rackmap[row][rack] = {}
|
||||
rackmap[row][rack][u] = {'node': enclosure, 'children': enclosuremap[enclosure]}
|
||||
allnodedata[enclosure] = rackmap[row][rack][u]
|
||||
if height:
|
||||
allnodedata[enclosure]['height'] = height
|
||||
else: # need to see if enclosure lands in the map naturally or need to pull it
|
||||
needenclosures.add(enclosure)
|
||||
elif u:
|
||||
if row not in rackmap:
|
||||
rackmap[row] = {}
|
||||
if rack not in rackmap[row]:
|
||||
rackmap[row][rack] = {}
|
||||
rackmap[row][rack][u] = {'node': node}
|
||||
allnodedata[node] = rackmap[row][rack][u]
|
||||
if height:
|
||||
allnodedata[node]['height'] = height
|
||||
locatednodes.add(node)
|
||||
cfgenc = needenclosures - locatednodes
|
||||
locationinfo = configmanager.get_node_attributes(cfgenc, (u'location.rack', u'location.row', u'location.u', u'location.height'))
|
||||
for enclosure in locationinfo:
|
||||
nodeinfo = locationinfo[enclosure]
|
||||
rack = nodeinfo.get(u'location.rack', {}).get('value', '')
|
||||
u = nodeinfo.get(u'location.u', {}).get('value', None)
|
||||
row = nodeinfo.get(u'location.row', {}).get('value', '')
|
||||
height = nodeinfo.get(u'location.height', {}).get('value', None)
|
||||
if u:
|
||||
allnodedata[enclosure] = {'node': enclosure, 'children': enclosuremap[enclosure]}
|
||||
if height:
|
||||
allnodedata[enclosure]['height'] = height
|
||||
if row not in rackmap:
|
||||
rackmap[row] = {}
|
||||
if rack not in rackmap[row]:
|
||||
rackmap[row][rack] = {}
|
||||
rackmap[row][rack][u] = allnodedata[enclosure]
|
||||
results = {
|
||||
'errors': [],
|
||||
'locations': rackmap,
|
||||
}
|
||||
for enclosure in enclosuremap:
|
||||
if enclosure not in allnodedata:
|
||||
results['errors'].append('Enclosure {} is missing required location information'.format(enclosure))
|
||||
else:
|
||||
allnodedata[enclosure]['children'] = enclosuremap[enclosure]
|
||||
needheight = set([])
|
||||
for node in allnodedata:
|
||||
if 'height' not in allnodedata[node]:
|
||||
needheight.add(node)
|
||||
needheight = ','.join(needheight)
|
||||
if needheight:
|
||||
for rsp in core.handle_path(
|
||||
'/noderange/{0}/description'.format(needheight),
|
||||
'retrieve', configmanager,
|
||||
inputdata=None):
|
||||
kvp = rsp.kvpairs
|
||||
for node in kvp:
|
||||
allnodedata[node]['height'] = kvp[node]['height']
|
||||
yield msg.Generic(results)
|
||||
|
@ -173,9 +173,11 @@ if __name__ == '__main__':
|
||||
os.chdir(os.path.dirname(sys.argv[2]))
|
||||
if isinstance(plays, dict):
|
||||
plays = [plays]
|
||||
taskman = TaskQueueManager(inventory=invman, loader=loader, passwords={},
|
||||
variable_manager=varman, stdout_callback=ResultsCollector())
|
||||
|
||||
for currplay in plays:
|
||||
taskman = TaskQueueManager(inventory=invman, loader=loader, passwords={},
|
||||
variable_manager=varman, stdout_callback=ResultsCollector())
|
||||
|
||||
currplay['hosts'] = sys.argv[1]
|
||||
if 'become' in currplay and 'become_user' not in currplay:
|
||||
del currplay['become']
|
||||
|
@ -141,6 +141,8 @@ def sessionhdl(connection, authname, skipauth=False, cert=None):
|
||||
if 'collective' in response:
|
||||
return collective.handle_connection(connection, cert,
|
||||
response['collective'])
|
||||
while not configmanager.config_is_ready():
|
||||
eventlet.sleep(1)
|
||||
if 'dispatch' in response:
|
||||
dreq = tlvdata.recvall(connection, response['dispatch']['length'])
|
||||
return pluginapi.handle_dispatch(connection, cert, dreq,
|
||||
|
@ -34,7 +34,7 @@ mkdir -p opt/confluent/lib/imgutil
|
||||
mkdir -p opt/confluent/bin
|
||||
mv imgutil opt/confluent/bin/
|
||||
chmod a+x opt/confluent/bin/imgutil
|
||||
mv ubuntu suse15 el7 el9 el8 opt/confluent/lib/imgutil/
|
||||
mv ubuntu* suse15 el7 el9 el8 opt/confluent/lib/imgutil/
|
||||
mkdir -p opt/confluent/share/licenses/confluent_imgutil
|
||||
cp LICENSE opt/confluent/share/licenses/confluent_imgutil
|
||||
|
||||
|
122
imgutil/imgutil
122
imgutil/imgutil
@ -3,7 +3,13 @@ import configparser
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
import datetime
|
||||
from distutils.dir_util import copy_tree
|
||||
import inspect
|
||||
from shutil import copytree as copytree
|
||||
if hasattr(inspect, 'getfullargspec') and 'dirs_exist_ok' in inspect.getfullargspec(copytree).args:
|
||||
def copy_tree(src, dst):
|
||||
copytree(src, dst, dirs_exist_ok=True)
|
||||
else:
|
||||
from distutils.dir_util import copy_tree
|
||||
import glob
|
||||
import json
|
||||
import argparse
|
||||
@ -17,7 +23,10 @@ import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import yaml
|
||||
try:
|
||||
import yaml
|
||||
except ImportError:
|
||||
pass
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
|
||||
if path.startswith('/opt'):
|
||||
@ -139,13 +148,30 @@ def capture_fs(args):
|
||||
masker.mask('/etc/ssh/*key')
|
||||
masker.mask('/etc/pki/tls/private/*')
|
||||
masker.mask('/root/.ssh/id_*')
|
||||
masker.mask('/etc/netplan/*.yaml')
|
||||
subprocess.check_call(['mksquashfs', '/run/imgutil/capin', fname + '.sfs', '-comp', 'xz'])
|
||||
|
||||
def capture_local_cleanup():
|
||||
shutil.rmtree('/usr/lib/dracut/modules.d/97confluent')
|
||||
try:
|
||||
shutil.rmtree('/usr/lib/dracut/modules.d/97confluent')
|
||||
except Exception:
|
||||
pass
|
||||
subprocess.check_call(['umount', '/run/imgutil/capout'])
|
||||
|
||||
def build_boot_tree(targpath):
|
||||
if glob.glob('/usr/lib/dracut/modules.d/97confluent/install*'):
|
||||
return build_el_boot_tree(targpath)
|
||||
elif glob.glob('/etc/initramfs-tools/'):
|
||||
return build_deb_boot_tree(targpath)
|
||||
|
||||
def build_deb_boot_tree(targpath):
|
||||
kver = os.uname().release
|
||||
mkdirp(os.path.join(targpath, 'boot/initramfs/'))
|
||||
subprocess.check_call(['mkinitramfs', '-o', os.path.join(targpath, 'boot/initramfs/distribution')])
|
||||
shutil.copy2('/boot/vmlinuz-{}'.format(kver), os.path.join(targpath, 'boot/kernel'))
|
||||
gather_bootloader(targpath)
|
||||
|
||||
def build_el_boot_tree(targpath):
|
||||
for dscript in glob.glob('/usr/lib/dracut/modules.d/97confluent/install*'):
|
||||
os.chmod(dscript, 0o755)
|
||||
kver = os.uname().release
|
||||
@ -168,19 +194,32 @@ def capture_remote(args):
|
||||
# with here locally,
|
||||
# another that is remotely called to gather target profile info
|
||||
# and a third that is exclusive to pack_image for diskless mode
|
||||
utillib = __file__.replace('bin/imgutil', 'lib/imgutil')
|
||||
utillib = os.path.join(utillib, 'el8/dracut/')
|
||||
subprocess.check_call(['ssh', targ, 'mkdir', '-p', '/run/imgutil/capenv'])
|
||||
subprocess.check_call(['rsync', __file__, '{0}:/run/imgutil/capenv/'.format(targ)])
|
||||
finfo = subprocess.check_output(['ssh', targ, 'python3', '/run/imgutil/capenv/imgutil', 'getfingerprint']).decode('utf8')
|
||||
finfo = json.loads(finfo)
|
||||
if finfo['oscategory'] not in ('el8', 'el9'):
|
||||
raise Exception('Not yet supported for capture: ' + repr(finfo))
|
||||
if finfo['oscategory'] not in ('el8', 'el9', 'ubuntu20.04', 'ubuntu22.04'):
|
||||
sys.stderr.write('Not yet supported for capture: ' + repr(finfo) + '\n')
|
||||
sys.exit(1)
|
||||
unmet = finfo.get('unmetprereqs', [])
|
||||
if unmet:
|
||||
for cmd in unmet:
|
||||
sys.stderr.write(cmd + '\n')
|
||||
sys.exit(1)
|
||||
oscat = finfo['oscategory']
|
||||
subprocess.check_call(['ssh', '-o', 'LogLevel=QUIET', '-t', targ, 'python3', '/run/imgutil/capenv/imgutil', 'capturelocal'])
|
||||
utillib = __file__.replace('bin/imgutil', 'lib/imgutil')
|
||||
utillib = os.path.join(utillib, '{}/dracut/'.format(oscat))
|
||||
subprocess.check_call(['rsync', '-a', utillib, '{0}:/usr/lib/dracut/modules.d/97confluent'.format(targ)])
|
||||
utillib = __file__.replace('bin/imgutil', 'lib/imgutil')
|
||||
if oscat.startswith('ubuntu'):
|
||||
utillib = os.path.join(utillib, '{}/initramfs-tools/'.format(oscat))
|
||||
if not os.path.exists(utillib):
|
||||
raise Exception('Not yet supported for capture: ' + repr(finfo))
|
||||
subprocess.check_call(['rsync', '-a', utillib, '{0}:/etc/initramfs-tools'.format(targ)])
|
||||
subprocess.check_call(['ssh', '-o', 'LogLevel=QUIET', '-t', targ, 'chmod', '+x', '/etc/initramfs-tools/hooks/confluent'])
|
||||
else:
|
||||
utillib = os.path.join(utillib, '{}/dracut/'.format(oscat))
|
||||
if not os.path.exists(utillib):
|
||||
raise Exception('Not yet supported for capture: ' + repr(finfo))
|
||||
subprocess.check_call(['rsync', '-a', utillib, '{0}:/usr/lib/dracut/modules.d/97confluent'.format(targ)])
|
||||
sys.stdout.write('Generating deployment initramfs...')
|
||||
sys.stdout.flush()
|
||||
subprocess.check_call(['ssh', '-o', 'LogLevel=QUIET', '-t', targ, 'python3', '/run/imgutil/capenv/imgutil', 'capturelocalboot'])
|
||||
@ -407,10 +446,12 @@ def get_mydir(oscategory):
|
||||
class OsHandler(object):
|
||||
def __init__(self, name, version, arch, args):
|
||||
self.name = name
|
||||
self._interactive = True
|
||||
self.version = version
|
||||
self.arch = arch
|
||||
self.sourcepath = None
|
||||
self.osname = '{}-{}-{}'.format(name, version, arch)
|
||||
self.captureprereqs = []
|
||||
try:
|
||||
pkglist = args.packagelist
|
||||
except AttributeError:
|
||||
@ -434,13 +475,16 @@ class OsHandler(object):
|
||||
except AttributeError:
|
||||
self.addrepos = []
|
||||
|
||||
def set_interactive(self, shouldbeinteractive):
|
||||
self._interactive = shouldbeinteractive
|
||||
|
||||
def get_json(self):
|
||||
odata = [self.oscategory, self.version, self.arch, self.name]
|
||||
for idx in range(len(odata)):
|
||||
if not isinstance(odata[idx], str):
|
||||
odata[idx] = odata[idx].decode('utf8')
|
||||
info = {'oscategory': odata[0],
|
||||
'version': odata[1], 'arch': odata[2], 'name': odata[3]}
|
||||
'version': odata[1], 'arch': odata[2], 'name': odata[3], 'unmetprereqs': self.captureprereqs}
|
||||
return json.dumps(info)
|
||||
|
||||
def prep_root_premount(self, args):
|
||||
@ -547,7 +591,10 @@ class SuseHandler(OsHandler):
|
||||
cmd = ['chmod', 'a+x']
|
||||
cmd.extend(glob.glob(os.path.join(targdir, '*')))
|
||||
subprocess.check_call(cmd)
|
||||
subprocess.check_call(['zypper', '-R', self.targpath, 'install'] + self.zyppargs)
|
||||
if self._interactive:
|
||||
subprocess.check_call(['zypper', '-R', self.targpath, 'install'] + self.zyppargs)
|
||||
else:
|
||||
subprocess.check_call(['zypper', '-n', '-R', self.targpath, 'install'] + self.zyppargs)
|
||||
os.symlink('/usr/lib/systemd/system/sshd.service', os.path.join(self.targpath, 'etc/systemd/system/multi-user.target.wants/sshd.service'))
|
||||
if os.path.exists(os.path.join(self.targpath, 'sbin/mkinitrd')):
|
||||
args.cmd = ['mkinitrd']
|
||||
@ -557,12 +604,21 @@ class SuseHandler(OsHandler):
|
||||
|
||||
|
||||
class DebHandler(OsHandler):
|
||||
def __init__(self, name, version, arch, args, codename):
|
||||
def __init__(self, name, version, arch, args, codename, hostpath):
|
||||
self.includepkgs = []
|
||||
self.targpath = None
|
||||
self.codename = codename
|
||||
self.oscategory = name + version
|
||||
super().__init__(name, version, arch, args)
|
||||
needpkgs = []
|
||||
if not os.path.exists(os.path.join(hostpath, 'usr/bin/tpm2_getcap')):
|
||||
needpkgs.append('tpm2-tools')
|
||||
lfuses = glob.glob(os.path.join(hostpath, '/lib/*/libfuse.so.2'))
|
||||
if not lfuses:
|
||||
needpkgs.append('libfuse2')
|
||||
if needpkgs:
|
||||
needapt = 'Missing packages needed in target for capture, to add required packages: apt install ' + ' '.join(needpkgs)
|
||||
self.captureprereqs.append(needapt)
|
||||
|
||||
def add_pkglists(self):
|
||||
self.includepkgs.extend(self.list_packages())
|
||||
@ -590,11 +646,27 @@ class DebHandler(OsHandler):
|
||||
|
||||
|
||||
class ElHandler(OsHandler):
|
||||
def __init__(self, name, version, arch, args):
|
||||
def __init__(self, name, version, arch, args, hostpath='/'):
|
||||
self.oscategory = 'el{0}'.format(version.split('.')[0])
|
||||
self.yumargs = []
|
||||
super().__init__(name, version, arch, args)
|
||||
|
||||
needpkgs = []
|
||||
if not hostpath:
|
||||
return
|
||||
if not os.path.exists(os.path.join(hostpath, 'usr/bin/tpm2_getcap')):
|
||||
needpkgs.append('tpm2-tools')
|
||||
lfuses = glob.glob(os.path.join(hostpath, '/usr/lib64/libfuse.so.2'))
|
||||
if not lfuses:
|
||||
needpkgs.append('fuse-libs')
|
||||
if not os.path.exists(os.path.join(hostpath, '/usr/bin/ipcalc')):
|
||||
needpkgs.append('ipcalc')
|
||||
if not os.path.exists(os.path.join(hostpath, 'usr/sbin/dhclient')):
|
||||
needpkgs.append('dhcp-client')
|
||||
if not os.path.exists(os.path.join(hostpath, 'usr/sbin/mount.nfs')):
|
||||
needpkgs.append('nfs-utils')
|
||||
if needpkgs:
|
||||
needapt = 'Missing packages needed in target for capture, to add required packages: dnf install ' + ' '.join(needpkgs)
|
||||
self.captureprereqs.append(needapt)
|
||||
|
||||
def add_pkglists(self):
|
||||
self.yumargs.extend(self.list_packages())
|
||||
@ -627,7 +699,10 @@ class ElHandler(OsHandler):
|
||||
cmd = ['chmod', 'a+x']
|
||||
cmd.extend(glob.glob(os.path.join(targdir, '*')))
|
||||
subprocess.check_call(cmd)
|
||||
subprocess.check_call(['yum'] + self.yumargs)
|
||||
if self._interactive:
|
||||
subprocess.check_call(['yum'] + self.yumargs)
|
||||
else:
|
||||
subprocess.check_call(['yum', '-y'] + self.yumargs)
|
||||
with open('/proc/mounts') as mountinfo:
|
||||
for line in mountinfo.readlines():
|
||||
if line.startswith('selinuxfs '):
|
||||
@ -764,6 +839,7 @@ def main():
|
||||
buildp.add_argument('-a', '--addpackagelist', action='append', default=[],
|
||||
help='A list of additional packages to include, may be specified multiple times')
|
||||
buildp.add_argument('-s', '--source', help='Directory to pull installation from, typically a subdirectory of /var/lib/confluent/distributions. By default, the repositories for the build system are used.')
|
||||
buildp.add_argument('-y', '--non-interactive', help='Avoid prompting for confirmation', action='store_true')
|
||||
buildp.add_argument('-v', '--volume',
|
||||
help='Directory to make available in the build environment. -v / will '
|
||||
'cause it to be mounted in image as /run/external/, -v /:/run/root '
|
||||
@ -966,7 +1042,7 @@ def fingerprint_source_el(files, sourcepath, args):
|
||||
if arch == 'noarch':
|
||||
prodinfo = open(os.path.join(sourcepath, '.discinfo')).read()
|
||||
arch = prodinfo.split('\n')[2]
|
||||
return ElHandler(osname, ver, arch, args)
|
||||
return ElHandler(osname, ver, arch, args, None)
|
||||
return None
|
||||
|
||||
|
||||
@ -1011,14 +1087,14 @@ def fingerprint_host_el(args, hostpath='/'):
|
||||
release = v
|
||||
elif k == 'Version':
|
||||
version = v
|
||||
except subprocess.SubprocessError:
|
||||
except (subprocess.SubprocessError, FileNotFoundError):
|
||||
return None
|
||||
if 'el8' not in release and 'el7' not in release and 'el9' not in release:
|
||||
return None
|
||||
osname = osname.replace('-release', '').replace('-', '_')
|
||||
if osname == 'centos_linux':
|
||||
osname = 'centos'
|
||||
return ElHandler(osname, version, os.uname().machine, args)
|
||||
return ElHandler(osname, version, os.uname().machine, args, hostpath)
|
||||
|
||||
|
||||
def fingerprint_host_deb(args, hostpath='/'):
|
||||
@ -1042,7 +1118,7 @@ def fingerprint_host_deb(args, hostpath='/'):
|
||||
except IOError:
|
||||
pass
|
||||
if osname:
|
||||
return DebHandler(osname, vers, os.uname().machine, args, codename)
|
||||
return DebHandler(osname, vers, os.uname().machine, args, codename, hostpath)
|
||||
|
||||
|
||||
def fingerprint_host_suse(args, hostpath='/'):
|
||||
@ -1098,6 +1174,8 @@ def build_root(args):
|
||||
sys.stderr.write(
|
||||
'Unable to recognize build system os\n')
|
||||
sys.exit(1)
|
||||
if args.non_interactive:
|
||||
oshandler.set_interactive(False)
|
||||
oshandler.set_target(args.scratchdir)
|
||||
oshandler.add_pkglists()
|
||||
for dirname in ('proc', 'sys', 'dev', 'run'):
|
||||
@ -1348,6 +1426,10 @@ def gather_bootloader(outdir, rootpath='/'):
|
||||
grubs = glob.glob(grubs)
|
||||
if len(grubs) == 1:
|
||||
grubbin = grubs[0]
|
||||
if 'ubuntu' in grubbin: # we needd to store a hint that this grub has a different hard coded prefix
|
||||
mkdirp(os.path.join(outdir, 'boot/EFI/ubuntu/'))
|
||||
with open(os.path.join(outdir, 'boot/EFI/ubuntu/grub.cfg'), 'w') as wo:
|
||||
wo.write('')
|
||||
shutil.copyfile(grubbin, os.path.join(outdir, 'boot/efi/boot/grubx64.efi'))
|
||||
shutil.copyfile(grubbin, os.path.join(outdir, 'boot/efi/boot/grub.efi'))
|
||||
|
||||
|
1
imgutil/ubuntu20.04
Symbolic link
1
imgutil/ubuntu20.04
Symbolic link
@ -0,0 +1 @@
|
||||
ubuntu
|
1
imgutil/ubuntu22.04
Symbolic link
1
imgutil/ubuntu22.04
Symbolic link
@ -0,0 +1 @@
|
||||
ubuntu
|
Loading…
x
Reference in New Issue
Block a user