mirror of
https://github.com/xcat2/confluent.git
synced 2024-11-22 01:22:00 +00:00
Merge branch 'master' of https://github.com/lenovo/confluent
This commit is contained in:
commit
9742a5d81b
@ -72,6 +72,7 @@ if options.log:
|
||||
logreader.replay_to_console(logname)
|
||||
sys.exit(0)
|
||||
#added functionality for wcons
|
||||
|
||||
if options.windowed:
|
||||
envstring=os.environ.get('NODECONSOLE_WINDOWED_COMMAND')
|
||||
if not envstring:
|
||||
|
@ -25,6 +25,8 @@ def add_lla(iface, mac):
|
||||
initbyte = int(pieces[0], 16) ^ 2
|
||||
lla = 'fe80::{0:x}{1}:{2}ff:fe{3}:{4}{5}/64'.format(initbyte, pieces[1], pieces[2], pieces[3], pieces[4], pieces[5])
|
||||
try:
|
||||
with open('/proc/sys/net/ipv6/conf/{0}/disable_ipv6'.format(iface), 'w') as setin:
|
||||
setin.write('0')
|
||||
subprocess.check_call(['ip', 'addr', 'add', 'dev', iface, lla, 'scope', 'link'])
|
||||
except Exception:
|
||||
return None
|
||||
|
@ -11,11 +11,10 @@ confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{pr
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
tail -f /var/log/confluent/confluent-firstboot.log > /dev/console &
|
||||
logshowpid=$!
|
||||
while ! ping -c 1 $confluent_mgr >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
@ -37,4 +36,5 @@ curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLU
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
kill $logshowpid
|
||||
) &
|
||||
tail --pid $! -F /var/log/confluent/confluent-firstboot.log > /dev/console
|
||||
|
@ -11,11 +11,10 @@ confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{pr
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
tail -f /var/log/confluent/confluent-firstboot.log > /dev/console &
|
||||
logshowpid=$!
|
||||
while ! ping -c 1 $confluent_mgr >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
@ -37,4 +36,5 @@ curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLU
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
kill $logshowpid
|
||||
) &
|
||||
tail --pid $! -F /var/log/confluent/confluent-firstboot.log > /dev/console
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
[ -e /tmp/confluent.initq ] && return 0
|
||||
. /lib/dracut-lib.sh
|
||||
udevadm trigger
|
||||
udevadm trigger --type=devices --action=add
|
||||
udevadm settle
|
||||
@ -96,6 +97,27 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
|
||||
fi
|
||||
cd /sys/class/net
|
||||
if ! grep MANAGER: /etc/confluent/confluent.info; then
|
||||
confluentsrv=$(getarg confluent)
|
||||
if [ ! -z "$confluentsrv" ]; then
|
||||
if [[ "$confluentsrv" = *":"* ]]; then
|
||||
confluenthttpsrv=[$confluentsrv]
|
||||
/usr/libexec/nm-initrd-generator ip=:dhcp6
|
||||
else
|
||||
confluenthttpsrv=$confluentsrv
|
||||
/usr/libexec/nm-initrd-generator ip=:dhcp
|
||||
fi
|
||||
NetworkManager --configure-and-quit=initrd --no-daemon
|
||||
myids=uuid=$(cat /sys/devices/virtual/dmi/id/product_uuid)
|
||||
for mac in $(ip -br link|grep -v LOOPBACK|awk '{print $3}'); do
|
||||
myids=$myids"/mac="$mac
|
||||
done
|
||||
myname=$(curl -sH "CONFLUENT_IDS: $myids" https://$confluenthttpsrv/confluent-api/self/whoami)
|
||||
if [ ! -z "$myname" ]; then
|
||||
echo NODENAME: $myname > /etc/confluent/confluent.info
|
||||
echo MANAGER: $confluentsrv >> /etc/confluent/confluent.info
|
||||
echo EXTMGRINFO: $confluentsrv'||1' >> /etc/confluent/confluent.info
|
||||
fi
|
||||
fi
|
||||
while ! grep ^EXTMGRINFO: /etc/confluent/confluent.info | awk -F'|' '{print $3}' | grep 1 >& /dev/null && [ "$TRIES" -lt 60 ]; do
|
||||
TRIES=$((TRIES + 1))
|
||||
for currif in *; do
|
||||
@ -147,7 +169,8 @@ v4cfg=${v4cfg#ipv4_method: }
|
||||
if [ "$v4cfg" = "static" ] || [ "$v4cfg" = "dhcp" ]; then
|
||||
mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg)
|
||||
mgr=${mgr#deploy_server: }
|
||||
else
|
||||
fi
|
||||
if [ -z "$mgr" ]; then
|
||||
mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg)
|
||||
mgr=${mgr#deploy_server_v6: }
|
||||
mgr="[$mgr]"
|
||||
|
@ -59,7 +59,8 @@ v4cfg=${v4cfg#ipv4_method: }
|
||||
if [ "$v4cfg" = "static" ] || [ "$v4cfg" = "dhcp" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server: }
|
||||
else
|
||||
fi
|
||||
if [ -z "$confluent_mgr" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server_v6: }
|
||||
confluent_mgr="[$confluent_mgr]"
|
||||
@ -77,7 +78,8 @@ v4cfg=${v4cfg#ipv4_method: }
|
||||
if [ "$v4cfg" = "static" ] || [ "$v4cfg" = "dhcp" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server: }
|
||||
else
|
||||
fi
|
||||
if [ -z "$confluent_mgr" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server_v6: }
|
||||
confluent_mgr="[$confluent_mgr]"
|
||||
@ -104,7 +106,8 @@ v4cfg=${v4cfg#ipv4_method: }
|
||||
if [ "$v4cfg" = "static" ] || [ "$v4cfg" = "dhcp" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server: }
|
||||
else
|
||||
fi
|
||||
if [ -z "$confluent_mgr" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server_v6: }
|
||||
confluent_mgr="[$confluent_mgr]"
|
||||
|
@ -13,7 +13,8 @@ if [ "$v4cfg" = "static" ] || [ "$v4cfg" = "dhcp" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server: }
|
||||
confluent_pingtarget=$confluent_mgr
|
||||
else
|
||||
fi
|
||||
if [ -z "$confluent_mgr" ]; then
|
||||
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg)
|
||||
confluent_mgr=${confluent_mgr#deploy_server_v6: }
|
||||
confluent_pingtarget=$confluent_mgr
|
||||
@ -22,11 +23,10 @@ fi
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
tail -n 0 -f /var/log/confluent/confluent-firstboot.log > /dev/console &
|
||||
logshowpid=$!
|
||||
while ! ping -c 1 $confluent_pingtarget >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
@ -49,4 +49,5 @@ curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLU
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
kill $logshowpid
|
||||
) &
|
||||
tail --pid $! -n 0 -F /var/log/confluent/confluent-firstboot.log > /dev/console
|
||||
|
@ -11,11 +11,10 @@ confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{pr
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
tail -f /var/log/confluent/confluent-firstboot.log > /dev/console &
|
||||
logshowpid=$!
|
||||
while ! ping -c 1 $confluent_mgr >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
@ -37,4 +36,5 @@ curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLU
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
kill $logshowpid
|
||||
) &
|
||||
tail --pid $! -F /var/log/confluent/confluent-firstboot.log > /dev/console
|
||||
|
0
confluent_osdeploy/suse15-diskless/profiles/default/scripts/onboot.d/.gitignore
vendored
Normal file
0
confluent_osdeploy/suse15-diskless/profiles/default/scripts/onboot.d/.gitignore
vendored
Normal file
@ -0,0 +1,286 @@
|
||||
#!/usr/bin/python3
|
||||
import subprocess
|
||||
import importlib
|
||||
import tempfile
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import pwd
|
||||
import grp
|
||||
from importlib.machinery import SourceFileLoader
|
||||
try:
|
||||
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
|
||||
except FileNotFoundError:
|
||||
apiclient = SourceFileLoader('apiclient', '/etc/confluent/apiclient').load_module()
|
||||
|
||||
|
||||
def partitionhostsline(line):
|
||||
comment = ''
|
||||
try:
|
||||
cmdidx = line.index('#')
|
||||
comment = line[cmdidx:]
|
||||
line = line[:cmdidx].strip()
|
||||
except ValueError:
|
||||
pass
|
||||
if not line:
|
||||
return '', [], comment
|
||||
ipaddr, names = line.split(maxsplit=1)
|
||||
names = names.split()
|
||||
return ipaddr, names, comment
|
||||
|
||||
class HostMerger(object):
|
||||
def __init__(self):
|
||||
self.byip = {}
|
||||
self.byname = {}
|
||||
self.sourcelines = []
|
||||
self.targlines = []
|
||||
|
||||
def read_source(self, sourcefile):
|
||||
with open(sourcefile, 'r') as hfile:
|
||||
self.sourcelines = hfile.read().split('\n')
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
for x in range(len(self.sourcelines)):
|
||||
line = self.sourcelines[x]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip:
|
||||
self.byip[currip] = x
|
||||
for name in names:
|
||||
self.byname[name] = x
|
||||
|
||||
def read_target(self, targetfile):
|
||||
with open(targetfile, 'r') as hfile:
|
||||
lines = hfile.read().split('\n')
|
||||
if not lines[-1]:
|
||||
lines = lines[:-1]
|
||||
for y in range(len(lines)):
|
||||
line = lines[y]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip in self.byip:
|
||||
x = self.byip[currip]
|
||||
if self.sourcelines[x] is None:
|
||||
# have already consumed this enntry
|
||||
continue
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
continue
|
||||
for name in names:
|
||||
if name in self.byname:
|
||||
x = self.byname[name]
|
||||
if self.sourcelines[x] is None:
|
||||
break
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
break
|
||||
else:
|
||||
self.targlines.append(line)
|
||||
|
||||
def write_out(self, targetfile):
|
||||
while not self.targlines[-1]:
|
||||
self.targlines = self.targlines[:-1]
|
||||
if not self.targlines:
|
||||
break
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
if not self.sourcelines:
|
||||
break
|
||||
with open(targetfile, 'w') as hosts:
|
||||
for line in self.targlines:
|
||||
hosts.write(line + '\n')
|
||||
for line in self.sourcelines:
|
||||
if line is not None:
|
||||
hosts.write(line + '\n')
|
||||
|
||||
|
||||
class CredMerger:
|
||||
def __init__(self):
|
||||
try:
|
||||
with open('/etc/login.defs', 'r') as ldefs:
|
||||
defs = ldefs.read().split('\n')
|
||||
except FileNotFoundError:
|
||||
defs = []
|
||||
lkup = {}
|
||||
self.discardnames = {}
|
||||
self.shadowednames = {}
|
||||
for line in defs:
|
||||
try:
|
||||
line = line[:line.index('#')]
|
||||
except ValueError:
|
||||
pass
|
||||
keyval = line.split()
|
||||
if len(keyval) < 2:
|
||||
continue
|
||||
lkup[keyval[0]] = keyval[1]
|
||||
self.uidmin = int(lkup.get('UID_MIN', 1000))
|
||||
self.uidmax = int(lkup.get('UID_MAX', 60000))
|
||||
self.gidmin = int(lkup.get('GID_MIN', 1000))
|
||||
self.gidmax = int(lkup.get('GID_MAX', 60000))
|
||||
self.shadowlines = None
|
||||
|
||||
def read_passwd(self, source, targfile=False):
|
||||
self.read_generic(source, self.uidmin, self.uidmax, targfile)
|
||||
|
||||
def read_group(self, source, targfile=False):
|
||||
self.read_generic(source, self.gidmin, self.gidmax, targfile)
|
||||
|
||||
def read_generic(self, source, minid, maxid, targfile):
|
||||
if targfile:
|
||||
self.targdata = []
|
||||
else:
|
||||
self.sourcedata = []
|
||||
with open(source, 'r') as inputfile:
|
||||
for line in inputfile.read().split('\n'):
|
||||
try:
|
||||
name, _, uid, _ = line.split(':', 3)
|
||||
uid = int(uid)
|
||||
except ValueError:
|
||||
continue
|
||||
if targfile:
|
||||
if uid < minid or uid > maxid:
|
||||
self.targdata.append(line)
|
||||
else:
|
||||
self.discardnames[name] = 1
|
||||
else:
|
||||
if name[0] in ('+', '#', '@'):
|
||||
self.sourcedata.append(line)
|
||||
elif uid >= minid and uid <= maxid:
|
||||
self.sourcedata.append(line)
|
||||
|
||||
def read_shadow(self, source):
|
||||
self.shadowlines = []
|
||||
try:
|
||||
with open(source, 'r') as inshadow:
|
||||
for line in inshadow.read().split('\n'):
|
||||
try:
|
||||
name, _ = line.split(':' , 1)
|
||||
except ValueError:
|
||||
continue
|
||||
if name in self.discardnames:
|
||||
continue
|
||||
self.shadowednames[name] = 1
|
||||
self.shadowlines.append(line)
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
def write_out(self, outfile):
|
||||
with open(outfile, 'w') as targ:
|
||||
for line in self.targdata:
|
||||
targ.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
targ.write(line + '\n')
|
||||
if outfile == '/etc/passwd':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/shadow')
|
||||
with open('/etc/shadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':', 1)
|
||||
if name[0] in ('+', '#', '@'):
|
||||
continue
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!:::::::\n')
|
||||
if outfile == '/etc/group':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/gshadow')
|
||||
with open('/etc/gshadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':' , 1)
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!::\n')
|
||||
|
||||
def appendonce(basepath, filename):
|
||||
with open(filename, 'rb') as filehdl:
|
||||
thedata = filehdl.read()
|
||||
targname = filename.replace(basepath, '')
|
||||
try:
|
||||
with open(targname, 'rb') as filehdl:
|
||||
targdata = filehdl.read()
|
||||
except IOError:
|
||||
targdata = b''
|
||||
if thedata in targdata:
|
||||
return
|
||||
with open(targname, 'ab') as targhdl:
|
||||
targhdl.write(thedata)
|
||||
|
||||
def synchronize():
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
appendoncedir = tempfile.mkdtemp()
|
||||
try:
|
||||
ac = apiclient.HTTPSClient()
|
||||
myips = []
|
||||
ipaddrs = subprocess.check_output(['ip', '-br', 'a']).split(b'\n')
|
||||
for line in ipaddrs:
|
||||
isa = line.split()
|
||||
if len(isa) < 3 or isa[1] != b'UP':
|
||||
continue
|
||||
for addr in isa[2:]:
|
||||
if addr.startswith(b'fe80::') or addr.startswith(b'169.254'):
|
||||
continue
|
||||
addr = addr.split(b'/')[0]
|
||||
if not isinstance(addr, str):
|
||||
addr = addr.decode('utf8')
|
||||
myips.append(addr)
|
||||
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
|
||||
if status == 202:
|
||||
lastrsp = ''
|
||||
while status != 204:
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
|
||||
if not isinstance(rsp, str):
|
||||
rsp = rsp.decode('utf8')
|
||||
if status == 200:
|
||||
lastrsp = rsp
|
||||
pendpasswd = os.path.join(tmpdir, 'etc/passwd')
|
||||
if os.path.exists(pendpasswd):
|
||||
cm = CredMerger()
|
||||
cm.read_passwd(pendpasswd, targfile=False)
|
||||
cm.read_passwd('/etc/passwd', targfile=True)
|
||||
cm.write_out('/etc/passwd')
|
||||
pendgroup = os.path.join(tmpdir, 'etc/group')
|
||||
if os.path.exists(pendgroup):
|
||||
cm = CredMerger()
|
||||
cm.read_group(pendgroup, targfile=False)
|
||||
cm.read_group('/etc/group', targfile=True)
|
||||
cm.write_out('/etc/group')
|
||||
pendhosts = os.path.join(tmpdir, 'etc/hosts')
|
||||
if os.path.exists(pendhosts):
|
||||
cm = HostMerger()
|
||||
cm.read_source(pendhosts)
|
||||
cm.read_target('/etc/hosts')
|
||||
cm.write_out('/etc/hosts')
|
||||
for dirn in os.walk(appendoncedir):
|
||||
for filen in dirn[2]:
|
||||
appendonce(appendoncedir, os.path.join(dirn[0], filen))
|
||||
if lastrsp:
|
||||
lastrsp = json.loads(lastrsp)
|
||||
opts = lastrsp.get('options', {})
|
||||
for fname in opts:
|
||||
uid = -1
|
||||
gid = -1
|
||||
for opt in opts[fname]:
|
||||
if opt == 'owner':
|
||||
try:
|
||||
uid = pwd.getpwnam(opts[fname][opt]['name']).pw_uid
|
||||
except KeyError:
|
||||
uid = opts[fname][opt]['id']
|
||||
elif opt == 'group':
|
||||
try:
|
||||
gid = grp.getgrnam(opts[fname][opt]['name']).gr_gid
|
||||
except KeyError:
|
||||
gid = opts[fname][opt]['id']
|
||||
elif opt == 'permissions':
|
||||
os.chmod(fname, int(opts[fname][opt], 8))
|
||||
if uid != -1 or gid != -1:
|
||||
os.chown(fname, uid, gid)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
shutil.rmtree(appendoncedir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
synchronize()
|
@ -0,0 +1,29 @@
|
||||
# It is advised to avoid /var/lib/confluent/public as a source for syncing. /var/lib/confluent/public
|
||||
# is served without authentication and thus any sensitive content would be a risk. If wanting to host
|
||||
# syncfiles on a common share, it is suggested to have /var/lib/confluent be the share and use some other
|
||||
# subdirectory other than public.
|
||||
#
|
||||
# Syncing is performed as the 'confluent' user, so all source files must be accessible by the confluent user.
|
||||
#
|
||||
# This file lists files to synchronize or merge to the deployed systems from the deployment server
|
||||
# To specify taking /some/path/hosts on the deployment server and duplicating it to /etc/hosts:
|
||||
# Note particularly the use of '->' to delineate source from target.
|
||||
# /some/path/hosts -> /etc/hosts
|
||||
|
||||
# If wanting to simply use the same path for source and destinaiton, the -> may be skipped:
|
||||
# /etc/hosts
|
||||
|
||||
# More function is available, for example to limit the entry to run only on n1 through n8, and to set
|
||||
# owner, group, and permissions in octal notation:
|
||||
# /example/source -> n1-n8:/etc/target (owner=root,group=root,permissions=600)
|
||||
|
||||
# Entries under APPENDONCE: will be added to specified target, only if the target does not already
|
||||
# contain the data in the source already in its entirety. This allows append in a fashion that
|
||||
# is friendly to being run repeatedly
|
||||
|
||||
# Entries under MERGE: will attempt to be intelligently merged. This supports /etc/group and /etc/passwd
|
||||
# Any supporting entries in /etc/shadow or /etc/gshadow are added automatically, with password disabled
|
||||
# It also will not inject 'system' ids (under 1,000 usually) as those tend to be local and rpm managed.
|
||||
MERGE:
|
||||
# /etc/passwd
|
||||
# /etc/group
|
29
confluent_osdeploy/suse15/profiles/hpc/syncfiles
Normal file
29
confluent_osdeploy/suse15/profiles/hpc/syncfiles
Normal file
@ -0,0 +1,29 @@
|
||||
# It is advised to avoid /var/lib/confluent/public as a source for syncing. /var/lib/confluent/public
|
||||
# is served without authentication and thus any sensitive content would be a risk. If wanting to host
|
||||
# syncfiles on a common share, it is suggested to have /var/lib/confluent be the share and use some other
|
||||
# subdirectory other than public.
|
||||
#
|
||||
# Syncing is performed as the 'confluent' user, so all source files must be accessible by the confluent user.
|
||||
#
|
||||
# This file lists files to synchronize or merge to the deployed systems from the deployment server
|
||||
# To specify taking /some/path/hosts on the deployment server and duplicating it to /etc/hosts:
|
||||
# Note particularly the use of '->' to delineate source from target.
|
||||
# /some/path/hosts -> /etc/hosts
|
||||
|
||||
# If wanting to simply use the same path for source and destinaiton, the -> may be skipped:
|
||||
# /etc/hosts
|
||||
|
||||
# More function is available, for example to limit the entry to run only on n1 through n8, and to set
|
||||
# owner, group, and permissions in octal notation:
|
||||
# /example/source -> n1-n8:/etc/target (owner=root,group=root,permissions=600)
|
||||
|
||||
# Entries under APPENDONCE: will be added to specified target, only if the target does not already
|
||||
# contain the data in the source already in its entirety. This allows append in a fashion that
|
||||
# is friendly to being run repeatedly
|
||||
|
||||
# Entries under MERGE: will attempt to be intelligently merged. This supports /etc/group and /etc/passwd
|
||||
# Any supporting entries in /etc/shadow or /etc/gshadow are added automatically, with password disabled
|
||||
# It also will not inject 'system' ids (under 1,000 usually) as those tend to be local and rpm managed.
|
||||
MERGE:
|
||||
# /etc/passwd
|
||||
# /etc/group
|
29
confluent_osdeploy/suse15/profiles/server/syncfiles
Normal file
29
confluent_osdeploy/suse15/profiles/server/syncfiles
Normal file
@ -0,0 +1,29 @@
|
||||
# It is advised to avoid /var/lib/confluent/public as a source for syncing. /var/lib/confluent/public
|
||||
# is served without authentication and thus any sensitive content would be a risk. If wanting to host
|
||||
# syncfiles on a common share, it is suggested to have /var/lib/confluent be the share and use some other
|
||||
# subdirectory other than public.
|
||||
#
|
||||
# Syncing is performed as the 'confluent' user, so all source files must be accessible by the confluent user.
|
||||
#
|
||||
# This file lists files to synchronize or merge to the deployed systems from the deployment server
|
||||
# To specify taking /some/path/hosts on the deployment server and duplicating it to /etc/hosts:
|
||||
# Note particularly the use of '->' to delineate source from target.
|
||||
# /some/path/hosts -> /etc/hosts
|
||||
|
||||
# If wanting to simply use the same path for source and destinaiton, the -> may be skipped:
|
||||
# /etc/hosts
|
||||
|
||||
# More function is available, for example to limit the entry to run only on n1 through n8, and to set
|
||||
# owner, group, and permissions in octal notation:
|
||||
# /example/source -> n1-n8:/etc/target (owner=root,group=root,permissions=600)
|
||||
|
||||
# Entries under APPENDONCE: will be added to specified target, only if the target does not already
|
||||
# contain the data in the source already in its entirety. This allows append in a fashion that
|
||||
# is friendly to being run repeatedly
|
||||
|
||||
# Entries under MERGE: will attempt to be intelligently merged. This supports /etc/group and /etc/passwd
|
||||
# Any supporting entries in /etc/shadow or /etc/gshadow are added automatically, with password disabled
|
||||
# It also will not inject 'system' ids (under 1,000 usually) as those tend to be local and rpm managed.
|
||||
MERGE:
|
||||
# /etc/passwd
|
||||
# /etc/group
|
@ -282,11 +282,17 @@ def initialize(cmdset):
|
||||
init_confluent_myname()
|
||||
certutil.create_certificate()
|
||||
if os.path.exists('/usr/lib/systemd/system/httpd.service'):
|
||||
subprocess.check_call(['systemctl', 'try-restart', 'httpd'])
|
||||
print('HTTP server has been restarted if it was running')
|
||||
try:
|
||||
subprocess.check_call(['systemctl', 'try-restart', 'httpd'])
|
||||
print('HTTP server has been restarted if it was running')
|
||||
except subprocess.CalledProcessError:
|
||||
emprint('New HTTPS certificates generated, restart the web server manually')
|
||||
elif os.path.exists('/usr/lib/systemd/system/apache2.service'):
|
||||
subprocess.check_call(['systemctl', 'try-restart', 'apache2'])
|
||||
print('HTTP server has been restarted if it was running')
|
||||
try:
|
||||
subprocess.check_call(['systemctl', 'try-restart', 'apache2'])
|
||||
print('HTTP server has been restarted if it was running')
|
||||
except subprocess.CalledProcessError:
|
||||
emprint('New HTTPS certificates generated, restart the web server manually')
|
||||
else:
|
||||
emprint('New HTTPS certificates generated, restart the web server manually')
|
||||
if cmdset.s:
|
||||
|
@ -124,10 +124,13 @@ node = {
|
||||
'deployment.apiarmed': {
|
||||
'description': ('Indicates whether the node authentication token interface '
|
||||
'is armed. If set to once, it will grant only the next '
|
||||
'request. If set to continuous, will allow many requests.'
|
||||
'Should not be set unless an OS deployment is pending. '
|
||||
'request. If set to continuous, will allow many requests, '
|
||||
'which greatly reduces security, particularly when connected to '
|
||||
'untrusted networks. '
|
||||
'Should not be set unless an OS deployment is pending on the node. '
|
||||
'Generally this is not directly modified, but is modified '
|
||||
'by the "nodedeploy" command'),
|
||||
'validvalues': ('once', 'continuous', ''),
|
||||
},
|
||||
'deployment.sealedapikey': {
|
||||
'description': 'This attribute is used by some images to save a sealed '
|
||||
|
@ -1239,7 +1239,7 @@ def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
|
||||
operation, pathcomponents, autostrip)
|
||||
elif pathcomponents[0] == 'discovery':
|
||||
return disco.handle_api_request(
|
||||
configmanager, inputdata, operation, pathcomponents)
|
||||
configmanager, inputdata, operation, pathcomponents, pluginmap['affluent'])
|
||||
elif pathcomponents[0] == 'networking':
|
||||
return macmap.handle_api_request(
|
||||
configmanager, inputdata, operation, pathcomponents)
|
||||
|
@ -19,12 +19,17 @@ import confluent.netutil as netutil
|
||||
import confluent.util as util
|
||||
import datetime
|
||||
import eventlet
|
||||
import eventlet.green.select as select
|
||||
import eventlet.green.socket as socket
|
||||
import eventlet.greenpool
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import struct
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
|
||||
libc = ctypes.CDLL(ctypes.util.find_library('c'))
|
||||
|
||||
# cred grant tlvs:
|
||||
# 0, 0 - null
|
||||
@ -36,6 +41,60 @@ import struct
|
||||
# 6, len, hmac - hmac of crypted key using shared secret for long-haul support
|
||||
# 128, len, len, key - sealed key
|
||||
|
||||
_semitrusted = []
|
||||
|
||||
def read_authnets(cfgpath):
|
||||
global _semitrusted
|
||||
with open(cfgpath, 'r') as cfgin:
|
||||
_semitrusted = []
|
||||
for line in cfgin.readlines():
|
||||
line = line.split('#', 1)[0].strip()
|
||||
if '/' not in line:
|
||||
continue
|
||||
subnet, prefix = line.split('/')
|
||||
prefix = int(prefix)
|
||||
_semitrusted.append((subnet, prefix))
|
||||
|
||||
|
||||
def watch_trusted():
|
||||
cfgpath = '/etc/confluent/auth_nets'
|
||||
if isinstance(cfgpath, bytes):
|
||||
bcfgpath = cfgpath
|
||||
else:
|
||||
bcfgpath = cfgpath.encode('utf8')
|
||||
while True:
|
||||
watcher = libc.inotify_init1(os.O_NONBLOCK)
|
||||
if not os.path.exists(cfgpath):
|
||||
with open(cfgpath, 'w') as cfgout:
|
||||
cfgout.write(
|
||||
'# This is a list of networks in addition to local\n'
|
||||
'# networks to allow grant of initial deployment token,\n'
|
||||
'# when a node has deployment API armed\n')
|
||||
try:
|
||||
read_authnets(cfgpath)
|
||||
except Exception:
|
||||
eventlet.sleep(15)
|
||||
continue
|
||||
if libc.inotify_add_watch(watcher, bcfgpath, 0xcc2) <= -1:
|
||||
eventlet.sleep(15)
|
||||
continue
|
||||
select.select((watcher,), (), (), 86400)
|
||||
try:
|
||||
os.read(watcher, 1024)
|
||||
except Exception:
|
||||
pass
|
||||
os.close(watcher)
|
||||
|
||||
|
||||
|
||||
def address_is_somewhat_trusted(address):
|
||||
for authnet in _semitrusted:
|
||||
if netutil.ip_on_same_subnet(address, authnet[0], authnet[1]):
|
||||
return True
|
||||
if netutil.address_is_local(address):
|
||||
return True
|
||||
return False
|
||||
|
||||
class CredServer(object):
|
||||
def __init__(self):
|
||||
self.cfm = cfm.ConfigManager(None)
|
||||
@ -60,7 +119,7 @@ class CredServer(object):
|
||||
elif tlv[1]:
|
||||
client.recv(tlv[1])
|
||||
if not hmackey:
|
||||
if not netutil.address_is_local(peer[0]):
|
||||
if not address_is_somewhat_trusted(peer[0]):
|
||||
client.close()
|
||||
return
|
||||
apimats = self.cfm.get_node_attributes(nodename,
|
||||
|
@ -424,7 +424,7 @@ def handle_autosense_config(operation, inputdata):
|
||||
stop_autosense()
|
||||
|
||||
|
||||
def handle_api_request(configmanager, inputdata, operation, pathcomponents):
|
||||
def handle_api_request(configmanager, inputdata, operation, pathcomponents, affluent=None):
|
||||
if pathcomponents == ['discovery', 'autosense']:
|
||||
return handle_autosense_config(operation, inputdata)
|
||||
if operation == 'retrieve':
|
||||
@ -435,7 +435,15 @@ def handle_api_request(configmanager, inputdata, operation, pathcomponents):
|
||||
raise exc.InvalidArgumentException()
|
||||
rescan()
|
||||
return (msg.KeyValueData({'rescan': 'started'}),)
|
||||
|
||||
elif operation in ('update', 'create') and pathcomponents == ['discovery', 'remote']:
|
||||
if 'subscribe' in inputdata:
|
||||
target = inputdata['subscribe']
|
||||
affluent.subscribe_discovery(target, configmanager, collective.get_myname())
|
||||
return (msg.KeyValueData({'status': 'subscribed'}),)
|
||||
if 'unsubscribe' in inputdata:
|
||||
target = inputdata['unsubscribe']
|
||||
affluent.unsubscribe_discovery(target, configmanager, collective.get_myname())
|
||||
return (msg.KeyValueData({'status': 'unsubscribed'}),)
|
||||
elif operation in ('update', 'create'):
|
||||
if pathcomponents == ['discovery', 'register']:
|
||||
return
|
||||
@ -487,6 +495,7 @@ def handle_read_api_request(pathcomponents):
|
||||
dirlist = [msg.ChildCollection(x + '/') for x in sorted(list(subcats))]
|
||||
dirlist.append(msg.ChildCollection('rescan'))
|
||||
dirlist.append(msg.ChildCollection('autosense'))
|
||||
dirlist.append(msg.ChildCollection('remote'))
|
||||
return dirlist
|
||||
if not coll:
|
||||
return show_info(queryparms['by-mac'])
|
||||
@ -855,6 +864,47 @@ def get_smm_neighbor_fingerprints(smmaddr, cv):
|
||||
continue
|
||||
yield 'sha256$' + b64tohex(neigh['sha256'])
|
||||
|
||||
def get_nodename_sysdisco(cfg, handler, info):
|
||||
switchname = info['forwarder_server']
|
||||
switchnode = None
|
||||
nl = cfg.filter_node_attributes('net.*switch=' + switchname)
|
||||
brokenattrs = False
|
||||
for n in nl:
|
||||
na = cfg.get_node_attributes(n, 'net.*switchport').get(n, {})
|
||||
for sp in na:
|
||||
pv = na[sp].get('value', '')
|
||||
if pv and macmap._namesmatch(info['port'], pv):
|
||||
if switchnode:
|
||||
log.log({'error': 'Ambiguous port information between {} and {}'.format(switchnode, n)})
|
||||
brokenattrs = True
|
||||
else:
|
||||
switchnode = n
|
||||
break
|
||||
if brokenattrs or not switchnode:
|
||||
return None
|
||||
if 'enclosure_num' not in info:
|
||||
return switchnode
|
||||
chainlen = info['enclosure_num']
|
||||
currnode = switchnode
|
||||
while chainlen > 1:
|
||||
nl = list(cfg.filter_node_attributes('enclosure.extends=' + currnode))
|
||||
if len(nl) > 1:
|
||||
log.log({'error': 'Multiple enclosures specify extending ' + currnode})
|
||||
return None
|
||||
if len(nl) == 0:
|
||||
log.log({'error': 'No enclosures specify extending ' + currnode + ' but an enclosure seems to be extending it'})
|
||||
return None
|
||||
currnode = nl[0]
|
||||
chainlen -= 1
|
||||
if info['type'] == 'lenovo-smm2':
|
||||
return currnode
|
||||
else:
|
||||
baynum = info['bay']
|
||||
nl = cfg.filter_node_attributes('enclosure.manager=' + currnode)
|
||||
nl = list(cfg.filter_node_attributes('enclosure.bay={0}'.format(baynum), nl))
|
||||
if len(nl) == 1:
|
||||
return nl[0]
|
||||
|
||||
|
||||
def get_nodename(cfg, handler, info):
|
||||
nodename = None
|
||||
@ -883,6 +933,10 @@ def get_nodename(cfg, handler, info):
|
||||
if not nodename and info['handler'] == pxeh:
|
||||
enrich_pxe_info(info)
|
||||
nodename = info.get('nodename', None)
|
||||
if 'forwarder_server' in info:
|
||||
# this has been registered by a remote discovery registry,
|
||||
# thus verification and specific location is fixed
|
||||
return get_nodename_sysdisco(cfg, handler, info), None
|
||||
if not nodename:
|
||||
# Ok, see if it is something with a chassis-uuid and discover by
|
||||
# chassis
|
||||
|
@ -219,6 +219,17 @@ class NodeHandler(bmchandler.NodeHandler):
|
||||
|
||||
def config(self, nodename):
|
||||
# SMM for now has to reset to assure configuration applies
|
||||
cd = self.configmanager.get_node_attributes(
|
||||
nodename, ['secret.hardwaremanagementuser',
|
||||
'secret.hardwaremanagementpassword',
|
||||
'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'],
|
||||
True)
|
||||
cd = cd.get(nodename, {})
|
||||
targbmc = cd.get('hardwaremanagement.manager', {}).get('value', '')
|
||||
currip = self.ipaddr if self.ipaddr else ''
|
||||
if not currip.startswith('fe80::') and (targbmc.startswith('fe80::') or not targbmc):
|
||||
raise exc.TargetEndpointUnreachable(
|
||||
'hardwaremanagement.manager must be set to desired address (No IPv6 Link Local detected)')
|
||||
dpp = self.configmanager.get_node_attributes(
|
||||
nodename, 'discovery.passwordrules')
|
||||
self.ruleset = dpp.get(nodename, {}).get(
|
||||
|
@ -520,6 +520,16 @@ class NodeHandler(immhandler.NodeHandler):
|
||||
|
||||
def config(self, nodename, reset=False):
|
||||
self.nodename = nodename
|
||||
cd = self.configmanager.get_node_attributes(
|
||||
nodename, ['secret.hardwaremanagementuser',
|
||||
'secret.hardwaremanagementpassword',
|
||||
'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'],
|
||||
True)
|
||||
cd = cd.get(nodename, {})
|
||||
targbmc = cd.get('hardwaremanagement.manager', {}).get('value', '')
|
||||
if not self.ipaddr.startswith('fe80::') and (targbmc.startswith('fe80::') or not targbmc):
|
||||
raise exc.TargetEndpointUnreachable(
|
||||
'hardwaremanagement.manager must be set to desired address (No IPv6 Link Local detected)')
|
||||
# TODO(jjohnson2): set ip parameters, user/pass, alert cfg maybe
|
||||
# In general, try to use https automation, to make it consistent
|
||||
# between hypothetical secure path and today.
|
||||
@ -541,12 +551,6 @@ class NodeHandler(immhandler.NodeHandler):
|
||||
self._setup_xcc_account(user, passwd, wc)
|
||||
wc = self.wc
|
||||
self._convert_sha256account(user, passwd, wc)
|
||||
cd = self.configmanager.get_node_attributes(
|
||||
nodename, ['secret.hardwaremanagementuser',
|
||||
'secret.hardwaremanagementpassword',
|
||||
'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'],
|
||||
True)
|
||||
cd = cd.get(nodename, {})
|
||||
if (cd.get('hardwaremanagement.method', {}).get('value', 'ipmi') != 'redfish'
|
||||
or cd.get('console.method', {}).get('value', None) == 'ipmi'):
|
||||
nwc = wc.dupe()
|
||||
@ -572,17 +576,13 @@ class NodeHandler(immhandler.NodeHandler):
|
||||
rsp, status = nwc.grab_json_response_with_status(
|
||||
'/redfish/v1/AccountService/Accounts/1',
|
||||
updateinf, method='PATCH')
|
||||
if ('hardwaremanagement.manager' in cd and
|
||||
cd['hardwaremanagement.manager']['value'] and
|
||||
not cd['hardwaremanagement.manager']['value'].startswith(
|
||||
'fe80::')):
|
||||
rawnewip = cd['hardwaremanagement.manager']['value']
|
||||
newip = rawnewip.split('/', 1)[0]
|
||||
if targbmc and not targbmc.startswith('fe80::'):
|
||||
newip = targbmc.split('/', 1)[0]
|
||||
newipinfo = getaddrinfo(newip, 0)[0]
|
||||
newip = newipinfo[-1][0]
|
||||
if ':' in newip:
|
||||
raise exc.NotImplementedException('IPv6 remote config TODO')
|
||||
netconfig = netutil.get_nic_config(self.configmanager, nodename, ip=rawnewip)
|
||||
netconfig = netutil.get_nic_config(self.configmanager, nodename, ip=targbmc)
|
||||
newmask = netutil.cidr_to_mask(netconfig['prefix'])
|
||||
currinfo = wc.grab_json_response('/api/providers/logoninfo')
|
||||
currip = currinfo.get('items', [{}])[0].get('ipv4_address', '')
|
||||
|
@ -29,6 +29,7 @@ import confluent.auth as auth
|
||||
import confluent.config.attributes as attribs
|
||||
import confluent.config.configmanager as configmanager
|
||||
import confluent.consoleserver as consoleserver
|
||||
import confluent.discovery.core as disco
|
||||
import confluent.forwarder as forwarder
|
||||
import confluent.exceptions as exc
|
||||
import confluent.log as log
|
||||
@ -591,7 +592,7 @@ def wsock_handler(ws):
|
||||
|
||||
def resourcehandler(env, start_response):
|
||||
try:
|
||||
if 'HTTP_SEC_WEBSOCKET_VERSION' in env:
|
||||
if 'HTTP_SEC_WEBSOCKET_VERSION' in env:
|
||||
for rsp in wsock_handler(env, start_response):
|
||||
yield rsp
|
||||
else:
|
||||
@ -622,7 +623,8 @@ def resourcehandler_backend(env, start_response):
|
||||
for res in selfservice.handle_request(env, start_response):
|
||||
yield res
|
||||
return
|
||||
if env.get('PATH_INFO', '').startswith('/booturl/by-node/'):
|
||||
reqpath = env.get('PATH_INFO', '')
|
||||
if reqpath.startswith('/boot/'):
|
||||
request = env['PATH_INFO'].split('/')
|
||||
if not request[0]:
|
||||
request = request[1:]
|
||||
@ -630,7 +632,14 @@ def resourcehandler_backend(env, start_response):
|
||||
start_response('400 Bad Request', headers)
|
||||
yield ''
|
||||
return
|
||||
nodename = request[2]
|
||||
if request[1] == 'by-mac':
|
||||
mac = request[2].replace('-', ':')
|
||||
nodename = disco.get_node_by_uuid_or_mac(mac)
|
||||
elif request[1] == 'by-uuid':
|
||||
uuid = request[2]
|
||||
nodename = disco.get_node_by_uuid_or_mac(uuid)
|
||||
elif request[1] == 'by-node':
|
||||
nodename = request[2]
|
||||
bootfile = request[3]
|
||||
cfg = configmanager.ConfigManager(None)
|
||||
nodec = cfg.get_node_attributes(nodename, 'deployment.pendingprofile')
|
||||
@ -639,7 +648,7 @@ def resourcehandler_backend(env, start_response):
|
||||
start_response('404 Not Found', headers)
|
||||
yield ''
|
||||
return
|
||||
redir = '/confluent-public/os/{0}/{1}'.format(pprofile, bootfile)
|
||||
redir = '/confluent-public/os/{0}/boot.{1}'.format(pprofile, bootfile)
|
||||
headers.append(('Location', redir))
|
||||
start_response('302 Found', headers)
|
||||
yield ''
|
||||
|
@ -237,6 +237,8 @@ class NetManager(object):
|
||||
if ipv6addr:
|
||||
myattribs['ipv6_method'] = 'static'
|
||||
myattribs['ipv6_address'] = ipv6addr
|
||||
else:
|
||||
myattribs['ipv6_method'] = 'dhcp'
|
||||
if attribs.get('ipv6_gateway', None) and 'ipv6_method' in myattribs:
|
||||
myattribs['ipv6_gateway'] = attribs['ipv6_gateway']
|
||||
if 'ipv4_method' not in myattribs and 'ipv6_method' not in myattribs:
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
import eventlet
|
||||
import eventlet.queue as queue
|
||||
import eventlet.green.socket as socket
|
||||
import confluent.exceptions as exc
|
||||
webclient = eventlet.import_patched('pyghmi.util.webclient')
|
||||
import confluent.messages as msg
|
||||
@ -53,6 +54,32 @@ class WebClient(object):
|
||||
return rsp
|
||||
|
||||
|
||||
def subscribe_discovery(node, configmanager, myname):
|
||||
creds = configmanager.get_node_attributes(
|
||||
node, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
|
||||
tsock = socket.create_connection((node, 443))
|
||||
myip = tsock.getsockname()[0]
|
||||
tsock.close()
|
||||
if ':' in myip:
|
||||
myip = '[{0}]'.format(myip)
|
||||
myurl = 'https://{0}/confluent-api/self/register_discovered'.format(myip)
|
||||
wc = WebClient(node, configmanager, creds)
|
||||
with open('/etc/confluent/tls/cacert.pem') as cain:
|
||||
cacert = cain.read()
|
||||
wc.wc.grab_json_response('/affluent/cert_authorities/{0}'.format(myname), cacert)
|
||||
res, status = wc.wc.grab_json_response_with_status('/affluent/discovery_subscribers/{0}'.format(myname), {'url': myurl, 'authname': node})
|
||||
if status == 200:
|
||||
agentkey = res['cryptkey']
|
||||
configmanager.set_node_attributes({node: {'crypted.selfapikey': {'hashvalue': agentkey}}})
|
||||
|
||||
def unsubscribe_discovery(node, configmanager, myname):
|
||||
creds = configmanager.get_node_attributes(
|
||||
node, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
|
||||
wc = WebClient(node, configmanager, creds)
|
||||
res, status = wc.wc.grab_json_response_with_status('/affluent/cert_authorities/{0}'.format(myname), method='DELETE')
|
||||
res, status = wc.wc.grab_json_response_with_status('/affluent/discovery_subscribers/{0}'.format(myname), method='DELETE')
|
||||
|
||||
|
||||
def update(nodes, element, configmanager, inputdata):
|
||||
for node in nodes:
|
||||
yield msg.ConfluentNodeError(node, 'Not Implemented')
|
||||
|
@ -71,6 +71,22 @@ def handle_request(env, start_response):
|
||||
cfg = configmanager.ConfigManager(None)
|
||||
nodename = env.get('HTTP_CONFLUENT_NODENAME', None)
|
||||
clientip = env.get('HTTP_X_FORWARDED_FOR', None)
|
||||
if env['PATH_INFO'] == '/self/whoami':
|
||||
clientids = env.get('HTTP_CONFLUENT_IDS', None)
|
||||
if not clientids:
|
||||
start_response('400 Bad Request', [])
|
||||
yield 'Bad Request'
|
||||
return
|
||||
for ids in clientids.split('/'):
|
||||
_, v = ids.split('=', 1)
|
||||
repname = disco.get_node_by_uuid_or_mac(v)
|
||||
if repname:
|
||||
start_response('200 OK', [])
|
||||
yield repname
|
||||
return
|
||||
start_response('404 Unknown', [])
|
||||
yield ''
|
||||
return
|
||||
if env['PATH_INFO'] == '/self/registerapikey':
|
||||
crypthmac = env.get('HTTP_CONFLUENT_CRYPTHMAC', None)
|
||||
if int(env.get('CONTENT_LENGTH', 65)) > 64:
|
||||
@ -163,7 +179,7 @@ def handle_request(env, start_response):
|
||||
start_response('400 Bad Requst', [])
|
||||
yield 'Missing Path'
|
||||
return
|
||||
targurl = '/hubble/systems/by-port/{0}/webaccess'.format(rb['path'])
|
||||
targurl = '/affluent/systems/by-port/{0}/webaccess'.format(rb['path'])
|
||||
tlsverifier = util.TLSCertVerifier(cfg, nodename, 'pubkeys.tls_hardwaremanager')
|
||||
wc = webclient.SecureHTTPConnection(nodename, 443, verifycallback=tlsverifier.verify_cert)
|
||||
relaycreds = cfg.get_node_attributes(nodename, 'secret.*', decrypt=True)
|
||||
@ -187,6 +203,8 @@ def handle_request(env, start_response):
|
||||
rb['addresses'] = [(newhost, newport)]
|
||||
rb['forwarder_url'] = targurl
|
||||
rb['forwarder_server'] = nodename
|
||||
if 'bay' in rb:
|
||||
rb['enclosure.bay'] = rb['bay']
|
||||
if rb['type'] == 'lenovo-xcc':
|
||||
ssdp.check_fish(('/DeviceDescription.json', rb), newport, verify_cert)
|
||||
elif rb['type'] == 'lenovo-smm2':
|
||||
|
@ -496,6 +496,7 @@ class SockApi(object):
|
||||
self.start_remoteapi()
|
||||
else:
|
||||
eventlet.spawn_n(self.watch_for_cert)
|
||||
eventlet.spawn_n(credserver.watch_trusted)
|
||||
eventlet.spawn_n(self.watch_resolv)
|
||||
self.unixdomainserver = eventlet.spawn(_unixdomainhandler)
|
||||
|
||||
|
@ -16,6 +16,16 @@ import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import yaml
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
|
||||
if path.startswith('/opt'):
|
||||
sys.path.append(path)
|
||||
|
||||
try:
|
||||
import confluent.osimage as osimage
|
||||
except ImportError:
|
||||
osimage = None
|
||||
|
||||
libc = ctypes.CDLL(ctypes.util.find_library('c'))
|
||||
CLONE_NEWNS = 0x00020000
|
||||
@ -194,8 +204,14 @@ def capture_remote(args):
|
||||
confdir = '/opt/confluent/lib/osdeploy/{}-diskless'.format(oscat)
|
||||
os.symlink('{}/initramfs/addons.cpio'.format(confdir),
|
||||
os.path.join(outdir, 'boot/initramfs/addons.cpio'))
|
||||
if os.path.exists('{}/profiles/default'.format(confdir)):
|
||||
copy_tree('{}/profiles/default'.format(confdir), outdir)
|
||||
indir = '{}/profiles/default'.format(confdir)
|
||||
if os.path.exists(indir):
|
||||
copy_tree(indir, outdir)
|
||||
hmap = osimage.get_hashes(outdir)
|
||||
with open('{0}/manifest.yaml'.format(outdir), 'w') as yout:
|
||||
yout.write('# This manifest enables rebase to know original source of profile data and if any customizations have been done\n')
|
||||
manifestdata = {'distdir': indir, 'disthashes': hmap}
|
||||
yout.write(yaml.dump(manifestdata, default_flow_style=False))
|
||||
label = '{0} {1} ({2})'.format(finfo['name'], finfo['version'], profname)
|
||||
with open(os.path.join(outdir, 'profile.yaml'), 'w') as profileout:
|
||||
profileout.write('label: {}\n'.format(label))
|
||||
@ -1239,8 +1255,14 @@ def pack_image(args):
|
||||
confdir = '/opt/confluent/lib/osdeploy/{}-diskless'.format(oscat)
|
||||
os.symlink('{}/initramfs/addons.cpio'.format(confdir),
|
||||
os.path.join(outdir, 'boot/initramfs/addons.cpio'))
|
||||
if os.path.exists('{}/profiles/default'.format(confdir)):
|
||||
copy_tree('{}/profiles/default'.format(confdir), outdir)
|
||||
indir = '{}/profiles/default'.format(confdir)
|
||||
if os.path.exists(indir):
|
||||
copy_tree(indir, outdir)
|
||||
hmap = osimage.get_hashes(outdir)
|
||||
with open('{0}/manifest.yaml'.format(outdir), 'w') as yout:
|
||||
yout.write('# This manifest enables rebase to know original source of profile data and if any customizations have been done\n')
|
||||
manifestdata = {'distdir': indir, 'disthashes': hmap}
|
||||
yout.write(yaml.dump(manifestdata, default_flow_style=False))
|
||||
tryupdate = True
|
||||
try:
|
||||
pwd.getpwnam('confluent')
|
||||
|
Loading…
Reference in New Issue
Block a user