2
0
mirror of https://github.com/xcat2/confluent.git synced 2025-02-27 15:50:24 +00:00

Merge branch 'master' into ip6

This commit is contained in:
Jarrod Johnson 2021-09-07 11:10:25 -04:00
commit 10e408559b
10 changed files with 394 additions and 84 deletions

View File

@ -101,7 +101,7 @@ done
cd /
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
hostname $nodename
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | head -n 1 | awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^MANAGER: /etc/confluent/confluent.info|head -n 1 | awk '{print $2}')
fi

View File

@ -134,7 +134,9 @@ class CredMerger:
else:
self.discardnames[name] = 1
else:
if uid >= minid and uid <= maxid:
if name[0] in ('+', '#', '@'):
self.sourcedata.append(line)
elif uid >= minid and uid <= maxid:
self.sourcedata.append(line)
def read_shadow(self, source):
@ -167,6 +169,8 @@ class CredMerger:
shadout.write(line + '\n')
for line in self.sourcedata:
name, _ = line.split(':', 1)
if name[0] in ('+', '#', '@'):
continue
if name in self.shadowednames:
continue
shadout.write(name + ':!:::::::\n')
@ -216,4 +220,4 @@ def synchronize():
if __name__ == '__main__':
synchronize()
synchronize()

View File

@ -102,7 +102,7 @@ done
cd /
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
hostname $nodename
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | head -n 1|awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^MANAGER: /etc/confluent/confluent.info|head -n 1 | awk '{print $2}')
fi

View File

@ -7,7 +7,29 @@ else
confluent_urls="$confluent_urls https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs"
/opt/confluent/bin/urlmount $confluent_urls /mnt/remoteimg
fi
mount -o loop,ro /mnt/remoteimg/*.sfs /mnt/remote
/opt/confluent/bin/confluent_imginfo /mnt/remoteimg/rootimg.sfs > /tmp/rootimg.info
loopdev=$(losetup -f)
export mountsrc=$loopdev
losetup -r $loopdev /mnt/remoteimg/rootimg.sfs
if grep '^Format: confluent_crypted' /tmp/rootimg.info > /dev/null; then
curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_mgr/confluent-api/self/profileprivate/pending/rootimg.key > /tmp/rootimg.key
cipher=$(head -n 1 /tmp/rootimg.key)
key=$(tail -n 1 /tmp/rootimg.key)
len=$(wc -c /mnt/remoteimg/rootimg.sfs | awk '{print $1}')
len=$(((len-4096)/512))
dmsetup create cryptimg --table "0 $len crypt $cipher $key 0 $loopdev 8"
/opt/confluent/bin/confluent_imginfo /dev/mapper/cryptimg > /tmp/rootimg.info
mountsrc=/dev/mapper/cryptimg
fi
if grep '^Format: squashfs' /tmp/rootimg.info > /dev/null; then
mount -o ro $mountsrc /mnt/remote
elif grep '^Format: confluent_multisquash' /tmp/rootimg.info; then
tail -n +3 /tmp/rootimg.info | awk '{gsub("/", "_"); print "echo 0 " $4 " linear '$mountsrc' " $3 " | dmsetup create mproot" $7}' > /tmp/setupmount.sh
. /tmp/setupmount.sh
cat /tmp/setupmount.sh |awk '{printf "mount /dev/mapper/"$NF" "; sub("mproot", ""); gsub("_", "/"); print "/mnt/remote"$NF}' > /tmp/mountparts.sh
. /tmp/mountparts.sh
fi
#mount -t tmpfs overlay /mnt/overlay
modprobe zram
memtot=$(grep ^MemTotal: /proc/meminfo|awk '{print $2}')
@ -15,8 +37,15 @@ memtot=$((memtot/2))$(grep ^MemTotal: /proc/meminfo | awk '{print $3'})
echo $memtot > /sys/block/zram0/disksize
mkfs.xfs /dev/zram0 > /dev/null
mount -o discard /dev/zram0 /mnt/overlay
mkdir -p /mnt/overlay/upper /mnt/overlay/work
mount -t overlay -o upperdir=/mnt/overlay/upper,workdir=/mnt/overlay/work,lowerdir=/mnt/remote disklessroot /sysroot
if [ ! -f /tmp/mountparts.sh ]; then
mkdir -p /mnt/overlay/upper /mnt/overlay/work
mount -t overlay -o upperdir=/mnt/overlay/upper,workdir=/mnt/overlay/work,lowerdir=/mnt/remote disklessroot /sysroot
else
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $3}'); do
mkdir -p /mnt/overlay${srcmount}/upper /mnt/overlay${srcmount}/work
mount -t overlay -o upperdir=/mnt/overlay${srcmount}/upper,workdir=/mnt/overlay${srcmount}/work,lowerdir=${srcmount} disklesspart /sysroot${srcmount#/mnt/remote}
done
fi
mkdir -p /sysroot/etc/ssh
mkdir -p /sysroot/etc/confluent
mkdir -p /sysroot/root/.ssh
@ -77,6 +106,7 @@ curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/o
mkdir -p /sysroot/opt/confluent/bin
curl -sf https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/onboot.sh > /sysroot/opt/confluent/bin/onboot.sh
chmod +x /sysroot/opt/confluent/bin/onboot.sh
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
ln -s /etc/systemd/system/onboot.service /sysroot/etc/systemd/system/multi-user.target.wants/onboot.service
cp /etc/confluent/functions /sysroot/etc/confluent/functions
@ -101,4 +131,9 @@ dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)
dnsdomain=${dnsdomain#dnsdomain: }
sed -i 's/^NETCONFIG_DNS_STATIC_SEARCHLIST="/NETCONFIG_DNS_STATIC_SEARCHLIST="'$dnsdomain/ /sysroot/etc/sysconfig/network/config
cp /run/confluent/ifroute-* /run/confluent/ifcfg-* /sysroot/etc/sysconfig/network
if grep installtodisk /proc/cmdline > /dev/null; then
. /etc/confluent/functions
run_remote installimage
exec reboot -f
fi
exec /opt/confluent/bin/start_root

View File

@ -340,6 +340,10 @@ class NodeHandler(immhandler.NodeHandler):
rsp, status = wc.grab_json_response_with_status(
'/api/function',
{'USER_UserModify': '{0},{1},,1,Administrator,0,0,0,0,,8,'.format(uid, username)})
elif status == 200 and rsp.get('return', 0) == 13:
rsp, status = wc.grab_json_response_with_status(
'/api/function',
{'USER_UserModify': '{0},{1},,1,4,0,0,0,0,,8,,,'.format(uid, username)})
self.tmppasswd = None
self._currcreds = (username, passwd)

View File

@ -15,8 +15,6 @@
# limitations under the License.
# A consolidated manage of neighbor table information management.
# Ultimately, this should use AF_NETLINK, but in the interest of time,
# use ip neigh for the moment
import confluent.util as util
import os
@ -44,34 +42,37 @@ def _update_neigh():
ndmsg= b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
s.sendall(nlhdr + ndmsg)
neightable = {}
while True:
pdata = s.recv(65536)
v = memoryview(pdata)
if struct.unpack('H', v[4:6])[0] == 3: # netlink done message
break
while len(v):
length, typ = struct.unpack('IH', v[:6])
if typ == 28:
hlen = struct.calcsize('BIHBB')
_, idx, state, flags, typ = struct.unpack('BIHBB', v[16:16+hlen])
if typ == 1: # only handle unicast entries
curraddr = None
currip = None
rta = v[16+hlen:length]
while len(rta):
rtalen, rtatyp = struct.unpack('HH', rta[:4])
if rtatyp == 2: # hwaddr
curraddr = rta[4:rtalen].tobytes()
if len(curraddr) == 20:
curraddr = curraddr[12:]
elif rtatyp == 1: # ip address
currip = rta[4:rtalen].tobytes()
rta = rta[rtalen:]
if not rtalen:
break
if curraddr and currip:
neightable[currip] = curraddr
v = v[length:]
try:
while True:
pdata = s.recv(65536)
v = memoryview(pdata)
if struct.unpack('H', v[4:6])[0] == 3:
break
while len(v):
length, typ = struct.unpack('IH', v[:6])
if typ == 28:
hlen = struct.calcsize('BIHBB')
_, idx, state, flags, typ = struct.unpack('BIHBB', v[16:16+hlen])
if typ == 1: # only handle unicast entries
curraddr = None
currip = None
rta = v[16+hlen:length]
while len(rta):
rtalen, rtatyp = struct.unpack('HH', rta[:4])
if rtatyp == 2: # hwaddr
curraddr = rta[4:rtalen].tobytes()
if len(curraddr) == 20:
curraddr = curraddr[12:]
elif rtatyp == 1: # ip address
currip = rta[4:rtalen].tobytes()
rta = rta[rtalen:]
if not rtalen:
break
if curraddr and currip:
neightable[currip] = curraddr
v = v[length:]
finally:
s.close()
def get_hwaddr(ipaddr):

View File

@ -363,7 +363,7 @@ def create_yumconf(sourcedir, addrepos):
yumconf.write('[addrepo-{0}]\n'.format(addrepoidx))
yumconf.write('name=Add-on repository {0}\n'.format(addrepoidx))
yumconf.write('baseurl={0}\n'.format(repo))
yumconf.write('enabled=1\ngpgcheck\0\n\n')
yumconf.write('enabled=1\ngpgcheck=0\n\n')
addrepoidx += 1
return repodir
@ -387,6 +387,8 @@ class OsHandler(object):
except AttributeError:
pkglist = ''
if pkglist:
if os.path.exists(os.path.abspath(pkglist)):
pkglist = os.path.abspath(pkglist)
self.pkglist = pkglist
if '/' not in self.pkglist:
self.pkglist = os.path.join(get_mydir(self.oscategory), self.pkglist)
@ -414,8 +416,10 @@ class OsHandler(object):
class SuseHandler(OsHandler):
def __init__(self, name, version, arch, args):
if not version.startswith(b'15.'):
raise Exception('Unsupported Suse version {}'.format(version.decode('utf8')))
if not isinstance(version, str):
version = version.decode('utf8')
if not version.startswith('15.'):
raise Exception('Unsupported Suse version {}'.format(version))
self.oscategory = 'suse15'
super().__init__(name, version, arch, args)
self.zyppargs = []
@ -437,17 +441,34 @@ class SuseHandler(OsHandler):
self.sources.append('file://' + os.path.join(sourcepath, 'Product-HPC'))
def prep_root(self, args):
gpgkeys = []
mkdirp(self.targpath)
if not self.sources:
gpgkeys = glob.glob('/usr/lib/rpm/gnupg/keys/*.asc')
targzypp = os.path.join(self.targpath, 'etc/zypp')
mkdirp(targzypp)
shutil.copytree(
'/etc/zypp/repos.d/', os.path.join(targzypp, 'repos.d'))
idx = 1
for source in self.sources:
subprocess.check_call(['zypper', '-R', self.targpath, 'ar', source])
if not source:
continue
if source.startswith('file://'):
gpgpath = source.replace('file://', '')
gpgkeys.extend(glob.glob(os.path.join(gpgpath, '*/gpg-pubkey*.asc')))
subprocess.check_call(['zypper', '-R', self.targpath, 'ar', source, 'source-{}'.format(idx)])
idx += 1
if gpgkeys:
addkeycmd = ['rpm', '--root', self.targpath, '--import'] + gpgkeys
subprocess.check_call(addkeycmd)
for source in self.addrepos.split(','):
if not source:
continue
if not source.startswith('/') and os.path.exists(os.path.abspath(source)):
source = os.path.abspath(source)
source = 'file://' + source
subprocess.check_call(['zypper', '-R', self.targpath, 'ar', source])
subprocess.check_call(['zypper', '-R', self.targpath, 'ar', source, 'source-{}'.format(idx)])
idx += 1
mydir = get_mydir(self.oscategory)
mkdirp(os.path.join(self.targpath, 'usr/lib/dracut/modules.d'))
mkdirp(os.path.join(self.targpath, 'etc/dracut.conf.d'))
@ -477,7 +498,7 @@ class ElHandler(OsHandler):
yumconfig = create_yumconf(sourcepath, self.addrepos)
self.yumargs.extend(
['--setopt=reposdir={0}'.format(yumconfig), '--disablerepo=*',
'--enablerepo=genimage-*'])
'--enablerepo=genimage-*', '--enablerepo=addrepo-*'])
self.sourcepath = sourcepath
def set_target(self, targpath):
@ -600,7 +621,7 @@ def main():
buildp.add_argument('-v', '--volume',
help='Directory to make available in the build environment. -v / will '
'cause it to be mounted in image as /run/external/, -v /:/run/root '
'will override the target to be /run/root', action='append')
'will override the target to be /run/root, and something like /var/lib/repository:- will cause it to mount to the identical path inside the image', action='append')
buildp.add_argument('scratchdir', help='Directory to build new diskless root in')
execp = sps.add_parser('exec', help='Start specified scratch directory as container')
execp.add_argument('-v', '--volume',
@ -608,12 +629,14 @@ def main():
'cause it to be mounted in image as /run/external/, -v /:/run/root '
'will override the target to be /run/root', action='append')
execp.add_argument('scratchdir', help='Directory of an unpacked diskless root')
execp.add_argument('cmd', nargs='*', help='Optional command to run (defaults to a shell)')
unpackp = sps.add_parser('unpack', help='Unpack a diskless image to a scratch directory')
unpackp.add_argument('profilename', help='The diskless OS profile to unpack')
unpackp.add_argument('scratchdir', help='Directory to extract diskless root to')
packp = sps.add_parser('pack', help='Pack a scratch directory to a diskless profile')
packp.add_argument('scratchdir', help='Directory containing diskless root')
packp.add_argument('profilename', help='The desired diskless OS profile name to pack the root into')
packp.add_argument('-b', '--baseprofile', help='Profile to copy extra info from, for example to make a new version of an existing profile, reference the previous one as baseprofile', default=None)
capturep = sps.add_parser('capture', help='Capture an image for cloning from a running system')
capturep.add_argument('node', help='Node to capture image from')
capturep.add_argument('profilename', help='Profile name for captured image')
@ -660,7 +683,13 @@ def exec_root_backend(args):
os.chroot(installroot)
os.chdir('/')
os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \W]$ '.format(imgname)
os.execv('/bin/bash', ['/bin/bash', '--login', '--noprofile'])
os.environ['CONFLUENT_IMGUTIL_MODE'] = 'exec'
if args.cmd:
if not args.cmd[0].startswith('/'):
args.cmd[0] = shutil.which(args.cmd[0])
os.execv(args.cmd[0], args.cmd)
else:
os.execv('/bin/bash', ['/bin/bash', '--login', '--noprofile'])
def _mount(src, dst, fstype=0, flags=0, options=0, mode=None):
@ -682,6 +711,11 @@ def build_root_backend(optargs):
installroot = args.scratchdir
_mount_constrained_fs(args, installroot)
oshandler.prep_root(optargs)
mkdirp(os.path.join(installroot, 'etc/'))
with open(os.path.join(installroot, 'etc/confluentimg.buildinfo'), 'w') as imginfo:
imginfo.write('BUILDDATE={}\n'.format(datetime.datetime.now().strftime('%Y-%m-%dT%H:%M')))
if args.source:
imginfo.write('BUILDSRC={}\n'.format(args.source))
def _mount_constrained_fs(args, installroot):
@ -694,6 +728,8 @@ def _mount_constrained_fs(args, installroot):
for v in args.volume:
if ':' in v:
src, dst = v.split(':')
if dst == '-':
dst = src
while dst and dst[0] == '/':
dst = dst[1:]
dst = os.path.join(installroot, dst)
@ -787,20 +823,27 @@ def fingerprint_host_el(args, hostpath='/'):
def fingerprint_host_suse(args, hostpath='/'):
try:
import rpm
except ImportError:
return None
ts = rpm.TransactionSet(hostpath)
rpms = ts.dbMatch('provides', 'distribution-release')
vers = None
osname = None
for inf in rpms:
if b'openSUSE' in inf.name and b'Leap' in inf.summary:
osname = 'opensuse_leap'
if inf.name.startswith(b'SLE_'):
osname = 'sle'
try:
with open(os.path.join(hostpath, 'etc/os-release')) as relfile:
relinfo = relfile.read().split('\n')
for inf in relinfo:
if '=' in inf:
key, val = inf.split('=', 1)
if key == 'ID':
if val.lower().replace('"', '') == 'opensuse-leap':
osname = 'opensuse_leap'
elif val.lower().replace(
'"', '') in ('sle_hpc', 'sles'):
osname = 'sle'
elif key == 'VERSION_ID':
vers = val.replace('"', '')
except IOError:
pass
if osname:
return SuseHandler(osname, inf.version, os.uname().machine, args)
return SuseHandler(osname, vers, os.uname().machine, args)
def fingerprint_host(args, hostpath='/'):
oshandler = None
@ -811,6 +854,8 @@ def fingerprint_host(args, hostpath='/'):
return oshandler
def build_root(args):
if args.scratchdir:
args.scratchdir = os.path.abspath(args.scratchdir)
check_root(args.scratchdir)
yumargs = ['yum', '--installroot={0}'.format(args.scratchdir)]
if args.source:
@ -822,9 +867,13 @@ def build_root(args):
else:
oshandler = fingerprint_host(args)
if oshandler is None:
sys.stderr.write(
'Unable to recognize source directory {0}\n'.format(
args.source))
if args.source:
sys.stderr.write(
'Unable to recognize source directory {0}\n'.format(
args.source))
else:
sys.stderr.write(
'Unable to recognize build system os\n')
sys.exit(1)
oshandler.set_target(args.scratchdir)
oshandler.add_pkglists()
@ -841,7 +890,7 @@ def prep_decrypt(indir):
currtabs = currtabs.decode('utf8').split('\n')
usednames = set([])
for tab in currtabs:
if not tab:
if ':' not in tab:
continue
tabname, _ = tab.split(':', 1)
usednames.add(tabname)
@ -890,9 +939,14 @@ def unpack_image(args):
if hdr[:4] in (b'sqsh', b'hsqs'):
break
raise Exception('Unrecognized image format')
mkdirp(scratchdir)
os.chdir(scratchdir)
subprocess.check_call(['unsquashfs', '-d', 'rootfs', indir])
while scratchdir.endswith('/'):
scratchdir = scratchdir[:-1]
scratchdir = os.path.abspath(scratchdir)
parentdir = os.path.dirname(scratchdir)
targdir = os.path.basename(scratchdir)
mkdirp(parentdir)
os.chdir(parentdir)
subprocess.check_call(['unsquashfs', '-d', targdir, indir])
finally:
if cleandmtable:
mounted = True
@ -907,12 +961,41 @@ def unpack_image(args):
time.sleep(0.1)
def recursecp(source, targ):
if os.path.islink(source):
if os.path.exists(targ):
return
linktarg = os.readlink(source)
os.symlink(linktarg, targ)
if os.path.isdir(source):
if not os.path.exists(targ):
os.mkdir(targ)
for entry in os.listdir(source):
srcentry = os.path.join(source, entry)
targentry = os.path.join(targ, entry)
recursecp(srcentry, targentry)
elif os.path.exists(targ):
return
else:
shutil.copy2(source, targ)
def pack_image(args):
outdir = args.profilename
if '/' in outdir:
raise Exception('Full path not supported, supply only the profile name')
privdir = os.path.join('/var/lib/confluent/private/os/', outdir)
outdir = os.path.join('/var/lib/confluent/public/os/', outdir)
imginfofile = os.path.join(args.scratchdir, 'etc/confluentimg.buildinfo')
distpath = None
try:
with open(imginfofile) as imginfoin:
imginfo = imginfoin.read().split('\n')
for lineinfo in imginfo:
if lineinfo.startswith('BUILDSRC='):
distpath = lineinfo.replace('BUILDSRC=', '')
except IOError:
pass
kerns = glob.glob(os.path.join(args.scratchdir, 'boot/vmlinuz-*'))
kvermap = {}
for kern in kerns:
@ -945,26 +1028,36 @@ def pack_image(args):
encrypt_image(tmploc, os.path.join(outdir, 'rootimg.sfs'), '{}/pending/rootimg.key'.format(privdir))
os.remove(tmploc)
with open(os.path.join(outdir, 'build-info'), 'w') as buildinfo:
buildinfo.write('Packed from {} on {}\n'.format(args.scratchdir, datetime.datetime.now().strftime('%Y-%m-%dT%H:%M')))
oshandler = fingerprint_host(args, args.scratchdir)
tryupdate = False
if oshandler:
prettyname = oshandler.osname
with open(os.path.join(args.scratchdir, 'etc/os-release')) as osr:
osrdata = osr.read().split('\n')
for line in osrdata:
if line.startswith('PRETTY_NAME="'):
prettyname = line.replace(
'PRETTY_NAME=', '').replace('"', '')
label = '{0} ({1})'.format(prettyname, 'Diskless Boot')
with open(os.path.join(outdir, 'profile.yaml'), 'w') as profiley:
profiley.write('label: {0}\nkernelargs: quiet # confluent_imagemethod=untethered|tethered\n'.format(label))
oscat = oshandler.oscategory
confdir = '/opt/confluent/lib/osdeploy/{}-diskless'.format(oscat)
os.symlink('{}/initramfs/addons.cpio'.format(confdir),
os.path.join(outdir, 'boot/initramfs/addons.cpio'))
if os.path.exists('{}/profiles/default'.format(confdir)):
copy_tree('{}/profiles/default'.format(confdir), outdir)
buildinfo.write('PACKEDFROM={}\nPACKDATE={}\n'.format(args.scratchdir, datetime.datetime.now().strftime('%Y-%m-%dT%H:%M')))
if args.baseprofile:
buildinfo.write('BASEPROFILE={}\n'.format(args.baseprofile))
if args.baseprofile:
if '/' not in args.baseprofile:
args.baseprofile = os.path.join('/var/lib/confluent/public/os', args.baseprofile)
recursecp(args.baseprofile, outdir)
tryupdate = True
else:
if distpath:
os.symlink(distpath, os.path.join(outdir, 'distribution'))
oshandler = fingerprint_host(args, args.scratchdir)
tryupdate = False
if oshandler:
prettyname = oshandler.osname
with open(os.path.join(args.scratchdir, 'etc/os-release')) as osr:
osrdata = osr.read().split('\n')
for line in osrdata:
if line.startswith('PRETTY_NAME="'):
prettyname = line.replace(
'PRETTY_NAME=', '').replace('"', '')
label = '{0} ({1})'.format(prettyname, 'Diskless Boot')
with open(os.path.join(outdir, 'profile.yaml'), 'w') as profiley:
profiley.write('label: {0}\nkernelargs: quiet # confluent_imagemethod=untethered|tethered # tethered is default when unspecified to save on memory, untethered will use more ram, but will not have any ongoing runtime root fs dependency on the http servers.\n'.format(label))
oscat = oshandler.oscategory
confdir = '/opt/confluent/lib/osdeploy/{}-diskless'.format(oscat)
os.symlink('{}/initramfs/addons.cpio'.format(confdir),
os.path.join(outdir, 'boot/initramfs/addons.cpio'))
if os.path.exists('{}/profiles/default'.format(confdir)):
copy_tree('{}/profiles/default'.format(confdir), outdir)
tryupdate = True
try:
pwd.getpwnam('confluent')

View File

@ -20,6 +20,7 @@ dracut_install /lib64/libfuse.so.2 /lib64/libfuse.so.2.9.7
dracut_install chown chroot dd expr kill parted rsync sort blockdev findfs insmod lvm
dracut_install /usr/lib/udev/rules.d/10-dm.rules /usr/sbin/dmsetup /usr/lib/udev/rules.d/95-dm-notify.rules
dracut_install /usr/lib/systemd/network/99-default.link
dracut_install losetup # multipart support
#this would be nfs with lock, but not needed, go nolock
#dracut_install mount.nfs rpcbind rpc.statd /etc/netconfig sm-notify

View File

@ -0,0 +1,136 @@
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017-2021 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script demonstrates a strategy for redfish bmcs that
# dhcp to leverage the confluent switch scanning to help
# bootstrap such devices. Be aware of the uuid reformatting
# code, and determine if it is relevant for the target system
# in question. The normal thing would be to leave UUID as-is,
# but some implementations mangle it in a misunderstanding
# of 'wire format' UUID. Also, here xCAT is used as the
# 'dhcp helper', so that may need to be replaced with dnsmasq
# or direct isc dhcp code.
# Unfortunately, this is particular about the dhcp server,
# the user must know if the bmc in question mangles the uuid
# or not, and other such limitation make this difficult to blindly
# recommend, but hopefully can be useful reference material
import sys
sys.path.append('/opt/confluent/lib/python')
import confluent.client as cli
import eventlet.greenpool
import gzip
import io
import json
import os
import struct
import subprocess
import time
webclient = eventlet.import_patched('pyghmi.util.webclient')
bmcsbyuuid = {}
def checkfish(addr, mac):
wc = webclient.SecureHTTPConnection(addr, 443, verifycallback=lambda x: True)
wc.connect()
wc.request('GET', '/redfish/v1')
rsp = wc.getresponse()
body = rsp.read()
if body[:2] == b'\x1f\x8b':
body = gzip.GzipFile(fileobj=io.BytesIO(body)).read()
try:
body = json.loads(body)
except json.decoder.JSONDecodeError:
return
uuid = body.get('UUID', None)
if not uuid:
return
#This part is needed if a bmc sticks 'wire format' uuid in the json body
#Should be skipped for bmcs that present it sanely
uuidparts = uuid.split('-')
uuidparts[0] = '{:08x}'.format(struct.unpack('!I', struct.pack('<I', int(uuidparts[0], 16)))[0])
uuidparts[1] = '{:04x}'.format(struct.unpack('!H', struct.pack('<H', int(uuidparts[1], 16)))[0])
uuidparts[2] = '{:04x}'.format(struct.unpack('!H', struct.pack('<H', int(uuidparts[2], 16)))[0])
uuid = '-'.join(uuidparts)
if uuid in bmcsbyuuid:
bmcsbyuuid[uuid]['bmcs'][mac] = addr
else:
bmcsbyuuid[uuid] = {'bmcs': {mac: addr}}
if __name__ == '__main__':
gpool = eventlet.greenpool.GreenPool()
with open('/var/lib/dhcpd/dhcpd.leases', 'r') as leasefile:
leases = leasefile.read()
inlease = False
currip = None
mactoips = {}
for line in leases.split('\n'):
if line.startswith('lease '):
currip = line.split()[1]
inlease = True
continue
if not inlease:
continue
if 'hardware ethernet' in line:
currmac = line.split()[-1].replace(';', '')
mactoips[currmac] = currip
currmac = None
currip = None
inlease = False
# warm up arp tables and fdb
pings = {}
for mac in mactoips:
pings[mac] = subprocess.Popen(['ping', '-c', '1', mactoips[mac]], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
for mac in pings:
ret = pings[mac].wait()
if ret != 0:
del mactoips[mac]
c = cli.Command()
list(c.update('/networking/macs/rescan', {'rescan': 'start'}))
scanning = True
mactonode = {}
while scanning:
for rsp in c.read('/networking/macs/rescan'):
scanning = rsp.get('scanning', True)
time.sleep(0.1)
for mac in mactoips:
macinfo = list(c.read('/networking/macs/by-mac/{}'.format(mac)))
for inf in macinfo:
if inf.get('possiblenode', None):
mactonode[mac] = inf['possiblenode']
for mac in sorted(mactonode):
gpool.spawn(checkfish, mactoips[mac], mac)
gpool.waitall()
for uuid in sorted(bmcsbyuuid):
macd = bmcsbyuuid[uuid]['bmcs']
macs = sorted(macd)
currnode = None
for mac in macs:
currnode = mactonode.get(mac, None)
if currnode:
break
print('Performing: nodeattrib {} id.uuid={} custom.bmcmac={} bmc={}'.format(currnode, uuid, macs[0], macd[macs[0]]))
list(c.update('/nodes/{}/attributes/current'.format(currnode), {'id.uuid': uuid, 'custom.bmcmac': macs[0], 'bmc': macd[macs[0]]}))
subprocess.check_call(['nodeadd', currnode + '-bmc', 'mac.mac=' + macs[0]])
subprocess.check_call(['makedhcp', currnode + '-bmc'])
subprocess.check_call(['nodeboot', currnode])
subprocess.check_call(['nodebmcreset', currnode])
list(c.update('/nodes/{}/attributes/current'.format(currnode), {'bmc': currnode + '-bmc'}))

36
misc/setinitalpwd.py Normal file
View File

@ -0,0 +1,36 @@
#!/usr/bin/python2
import pyghmi.util.webclient as webclient
import json
import os
import sys
missingargs = False
if 'XCCPASS' not in os.environ:
print('Must set XCCPASS environment variable')
missingargs = True
if missingargs:
sys.exit(1)
w = webclient.SecureHTTPConnection(sys.argv[1], 443, verifycallback=lambda x: True)
w.connect()
adata = json.dumps({'username': 'USERID', 'password': 'PASSW0RD'})
headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'}
w.request('POST', '/api/login', adata, headers)
rsp = w.getresponse()
if rsp.status == 200:
rspdata = json.loads(rsp.read())
w.set_header('Content-Type', 'application/json')
w.set_header('Authorization', 'Bearer ' + rspdata['access_token'])
if '_csrf_token' in w.cookies:
w.set_header('X-XSRF-TOKEN', w.cookies['_csrf_token'])
if rspdata.get('pwchg_required', False):
print(repr(w.grab_json_response('/api/function', {'USER_UserPassChange': os.environ['XCCPASS']})))
print(repr(w.grab_json_response('/api/dataset', {
'USER_GlobalPassExpWarningPeriod': '0',
'USER_GlobalPassExpPeriod': '0',
'USER_GlobalMinPassReuseCycle': '0',
'USER_GlobalMinPassReuseCycle': '0',
'USER_GlobalMinPassChgInt': '0',
})))
#print(repr(w.grab_json_response('/api/function', {'USER_UserPassChange': '1,' + os.environ['XCCPASS']})))