2
0
mirror of https://github.com/xcat2/confluent.git synced 2024-11-22 09:32:21 +00:00

Merge branch 'xcat2:master' into master

This commit is contained in:
Tkucherera 2023-05-31 17:34:52 -04:00 committed by GitHub
commit 9ebb8f27ec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 706 additions and 321 deletions

View File

@ -229,7 +229,7 @@ if options.windowed:
elif 'Height' in line:
window_height = int(line.split(':')[1])
elif '-geometry' in line:
l = re.split(' |x|\+', line)
l = re.split(' |x|-|\+', line)
l_nosp = [ele for ele in l if ele.strip()]
wmxo = int(l_nosp[1])
wmyo = int(l_nosp[2])
@ -313,4 +313,4 @@ if options.tile:
os.execlp('tmux', 'tmux', 'attach', '-t', sessname)
else:
os.execl(confettypath, confettypath, 'start',
'/nodes/{0}/console/session'.format(args[0]))
'/nodes/{0}/console/session'.format(args[0]))

166
confluent_client/bin/nodefreshen Executable file
View File

@ -0,0 +1,166 @@
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import optparse
import os
import select
import signal
import subprocess
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
sys.path.append(path)
import confluent.client as client
import confluent.sortutil as sortutil
devnull = None
def run():
global devnull
devnull = open(os.devnull, 'rb')
argparser = optparse.OptionParser(
usage="Usage: %prog [options] noderange commandexpression",
epilog="Expressions are the same as in attributes, e.g. "
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
argparser.add_option('-f', '-c', '--count', type='int', default=168,
help='Number of commands to run at a time')
argparser.add_option('-k', '--security', action='store_true',
help='Update SSH setup')
argparser.add_option('-F', '--sync', action='store_true',
help='Run the syncfiles associated with the currently completed OS profile on the noderange')
argparser.add_option('-P', '--scripts',
help='Re-run specified scripts, with full path under scripts, e.g. post.d/first,firstboot.d/second')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to run remote ssh command to, '
'prompting if over the threshold')
# among other things, FD_SETSIZE limits. Besides, spawning too many
# processes can be unkind for the unaware on memory pressure and such...
#argparser.disable_interspersed_args()
(options, args) = argparser.parse_args()
if len(args) < 1:
argparser.print_help()
sys.exit(1)
client.check_globbing(args[0])
concurrentprocs = options.count
c = client.Command()
currprocs = 0
all = set([])
pipedesc = {}
pendingexecs = deque()
exitcode = 0
c.stop_if_noderange_over(args[0], options.maxnodes)
nodemap = {}
cmdparms = []
nodes = []
for res in c.read('/noderange/{0}/nodes/'.format(args[0])):
if 'error' in res:
sys.stderr.write(res['error'] + '\n')
exitcode |= res.get('errorcode', 1)
break
node = res['item']['href'][:-1]
nodes.append(node)
cmdstorun = []
if options.security:
cmdstorun.append(['run_remote', 'setupssh'])
if options.sync:
cmdstorun.append(['run_remote_python', 'syncfileclient'])
if options.scripts:
for script in options.scripts.split(','):
cmdstorun.append(['run_remote', script])
if not cmdstorun:
argparser.print_help()
sys.exit(1)
idxbynode = {}
cmdvbase = ['bash', '/etc/confluent/functions']
for sshnode in nodes:
idxbynode[sshnode] = 1
cmdv = ['ssh', sshnode] + cmdvbase + cmdstorun[0]
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
else:
pendingexecs.append((node, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)
while all:
pernodeout = {}
for r in rdy:
desc = pipedesc[r]
node = desc['node']
data = True
while data and select.select([r], [], [], 0)[0]:
data = r.readline()
if data:
if desc['type'] == 'stdout':
if node not in pernodeout:
pernodeout[node] = []
pernodeout[node].append(data)
else:
data = client.stringify(data)
sys.stderr.write('{0}: {1}'.format(node, data))
sys.stderr.flush()
else:
pop = desc['popen']
ret = pop.poll()
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
r.close()
if desc['type'] == 'stdout':
if idxbynode[node] < len(cmdstorun):
cmdv = ['ssh', sshnode] + cmdvbase + cmdstorun[idxbynode[node]]
idxbynode[node] += 1
run_cmdv(node, cmdv, all, pipedesc)
elif pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
for node in sortutil.natural_sort(pernodeout):
for line in pernodeout[node]:
line = client.stringify(line)
line = line.lstrip('\x08')
sys.stdout.write('{0}: {1}'.format(node, line))
sys.stdout.flush()
if all:
rdy, _, _ = select.select(all, [], [], 10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
nopen = subprocess.Popen(
cmdv, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
all.add(nopen.stdout)
all.add(nopen.stderr)
if __name__ == '__main__':
run()

View File

@ -21,6 +21,10 @@ class InvalidApiKey(Exception):
def msg_align(len):
return (len + 3) & ~3
try:
_protoparm = ssl.PROTOCOL_TLS_CLIENT
except AttributeError:
_protoparm = ssl.PROTOCOL_SSLv23
cryptname = ctypes.util.find_library('crypt')
if not cryptname:
if os.path.exists('/lib64/libcrypt.so.2') or os.path.exists('/usr/lib64/libcrypt.so.2'):
@ -305,7 +309,7 @@ class HTTPSClient(client.HTTPConnection, object):
def check_connections(self):
foundsrv = None
hosts = self.hosts
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx = ssl.SSLContext(_protoparm)
ctx.load_verify_locations('/etc/confluent/ca.pem')
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
@ -353,11 +357,12 @@ class HTTPSClient(client.HTTPConnection, object):
raise Exception('Unable to reach any hosts')
return foundsrv
def connect(self):
def connect(self, tries=3):
addrinf = socket.getaddrinfo(self.host, self.port)[0]
psock = socket.socket(addrinf[0])
psock.settimeout(15)
psock.connect(addrinf[4])
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx = ssl.SSLContext(_protoparm)
ctx.load_verify_locations('/etc/confluent/ca.pem')
host = self.host.split('%', 1)[0]
if '[' not in host and ':' in host:
@ -375,6 +380,10 @@ class HTTPSClient(client.HTTPConnection, object):
self.errout.write(errmsg)
self.errout.flush()
sys.exit(1)
except socket.timeout:
if not tries:
raise
return self.connect(tries=tries-1)
def grab_url(self, url, data=None, returnrsp=False):
return self.grab_url_with_status(url, data, returnrsp)[1]

View File

@ -1,6 +1,7 @@
#!/bin/sh
[ -e /tmp/confluent.initq ] && return 0
. /lib/dracut-lib.sh
setsid sh -c 'exec bash <> /dev/tty2 >&0 2>&1' &
udevadm trigger
udevadm trigger --type=devices --action=add
udevadm settle
@ -55,21 +56,49 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
sed -n '/^net_cfgs:/, /^[^- ]/{/^[^- ]/!p}' cnflnt.yml |sed -n '/^-/, /^-/{/^-/!p}'| sed -e 's/^[- ]*//'> $tcfg
autoconfigmethod=$(grep ^ipv4_method: $tcfg)
autoconfigmethod=${autoconfigmethod#ipv4_method: }
if [ "$autoconfigmethod" = "dhcp" ]; then
/usr/libexec/nm-initrd-generator ip=:dhcp
else
v4addr=$(grep ^ipv4_address: $tcfg)
v4addr=${v4addr#ipv4_address: }
v4addr=${v4addr%/*}
v4gw=$(grep ^ipv4_gateway: $tcfg)
v4gw=${v4gw#ipv4_gateway: }
if [ "$v4gw" = "null" ]; then
v4gw=""
for i in /sys/class/net/*; do
ip link set $(basename $i) down
udevadm info $i | grep ID_NET_DRIVER=cdc_ether > /dev/null && continue
ip link set $(basename $i) up
done
for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK| awk '{print $2}' | sed -e 's/:$//'); do
if [ "$autoconfigmethod" = "dhcp" ]; then
/usr/libexec/nm-initrd-generator ip=$NICGUESS:dhcp
else
v4addr=$(grep ^ipv4_address: $tcfg)
v4addr=${v4addr#ipv4_address: }
v4plen=${v4addr#*/}
v4addr=${v4addr%/*}
v4gw=$(grep ^ipv4_gateway: $tcfg)
v4gw=${v4gw#ipv4_gateway: }
ip addr add dev $NICGUESS $v4addr/$v4plen
if [ "$v4gw" = "null" ]; then
v4gw=""
fi
if [ ! -z "$v4gw" ]; then
ip route add default via $v4gw
fi
v4nm=$(grep ipv4_netmask: $tcfg)
v4nm=${v4nm#ipv4_netmask: }
DETECTED=0
for dsrv in $deploysrvs; do
if curl --capath /tls/ -s --connect-timeout 3 https://$dsrv/confluent-public/ > /dev/null; then
rm /run/NetworkManager/system-connections/*
/usr/libexec/nm-initrd-generator ip=$v4addr::$v4gw:$v4nm:$hostname:$NICGUESS:none
DETECTED=1
ifname=$NICGUESS
break
fi
done
if [ ! -z "$v4gw" ]; then
ip route del default via $v4gw
fi
ip addr flush dev $NICGUESS
if [ $DETECTED = 1 ]; then
break
fi
fi
v4nm=$(grep ipv4_netmask: $tcfg)
v4nm=${v4nm#ipv4_netmask: }
/usr/libexec/nm-initrd-generator ip=$v4addr::$v4gw:$v4nm:$hostname::none
fi
done
NetworkManager --configure-and-quit=initrd --no-daemon
hmackeyfile=/tmp/cnflnthmackeytmp
echo -n $(grep ^apitoken: cnflnt.yml|awk '{print $2}') > $hmackeyfile
@ -85,10 +114,10 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
echo 'MANAGER: '$deploysrv >> /etc/confluent/confluent.info
done
for deployer in $deploysrvs; do
if curl -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_CRYPTHMAC: $(cat $hmacfile)" -d@$passcrypt -k https://$deployer/confluent-api/self/registerapikey; then
if curl --capath /tls/ -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_CRYPTHMAC: $(cat $hmacfile)" -d@$passcrypt -k https://$deployer/confluent-api/self/registerapikey; then
cp $passfile /etc/confluent/confluent.apikey
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$deployer/confluent-api/self/deploycfg2 > /etc/confluent/confluent.deploycfg
curl --capath /tls/ -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$deployer/confluent-api/self/deploycfg2 > /etc/confluent/confluent.deploycfg
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg)
confluent_profile=${confluent_profile#profile: }
mgr=$deployer
@ -114,7 +143,7 @@ if ! grep MANAGER: /etc/confluent/confluent.info; then
for mac in $(ip -br link|grep -v LOOPBACK|awk '{print $3}'); do
myids=$myids"/mac="$mac
done
myname=$(curl -sH "CONFLUENT_IDS: $myids" https://$confluenthttpsrv/confluent-api/self/whoami)
myname=$(curl --capath /tls/ -sH "CONFLUENT_IDS: $myids" https://$confluenthttpsrv/confluent-api/self/whoami)
if [ ! -z "$myname" ]; then
echo NODENAME: $myname > /etc/confluent/confluent.info
echo MANAGER: $confluentsrv >> /etc/confluent/confluent.info
@ -149,14 +178,16 @@ while ! confluentpython /opt/confluent/bin/apiclient $errout /confluent-api/self
sleep 10
done
ifidx=$(cat /tmp/confluent.ifidx 2> /dev/null)
if [ ! -z "$ifidx" ]; then
ifname=$(ip link |grep ^$ifidx:|awk '{print $2}')
ifname=${ifname%:}
ifname=${ifname%@*}
echo $ifname > /tmp/net.ifaces
else
ip -br a|grep UP|awk '{print $1}' > /tmp/net.ifaces
ifname=$(cat /tmp/net.ifaces)
if [ -z "$ifname" ]; then
if [ ! -z "$ifidx" ]; then
ifname=$(ip link |grep ^$ifidx:|awk '{print $2}')
ifname=${ifname%:}
ifname=${ifname%@*}
echo $ifname > /tmp/net.ifaces
else
ip -br a|grep UP|awk '{print $1}' > /tmp/net.ifaces
ifname=$(cat /tmp/net.ifaces)
fi
fi
dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)

View File

@ -161,7 +161,7 @@ else
echo -e "BOOTPROTO='static'\nSTARTMODE='auto'" >> /run/confluent/ifcfg-$ifname
echo "IPADDR='$v4addr/$v4nm'" >> /run/confluent/ifcfg-$ifname
if [ ! -z "$v4gw" ]; then
echo defafult $v4gw - $ifname > /run/confluent/ifroute-$ifname
echo default $v4gw - $ifname > /run/confluent/ifroute-$ifname
fi
fi

View File

@ -262,12 +262,13 @@ if __name__ == '__main__':
macok = True
#adding new code to check if the response is something like net.<something>switch
for key in rsp:
if key.startswith('net.')and key.endswith('switch'):
if ((key.startswith('net.') and key.endswith('switch'))
or (key.startswith('power.') and key.endswith('pdu'))
or (key.startswith('enclosure.') and key.endswith('manager'))
):
switch_value = rsp[key].get('value',None)
if switch_value in valid_nodes:
print(f'{switch_value} is valid.')
else:
emprint(f'{switch_value} is not valid.')
if switch_value and switch_value not in valid_nodes:
emprint(f'{switch_value} is not a valid node name (as referenced by attribute "{key}" of node {args.node}).')
if not uuidok and not macok:
allok = False
emprint(f'{args.node} does not have a uuid or mac address defined in id.uuid or net.*hwaddr, deployment will not work')

View File

@ -66,6 +66,7 @@ import confluent.config.configmanager as cfm
import confluent.collective.manager as collective
import confluent.discovery.protocols.pxe as pxe
import confluent.discovery.protocols.ssdp as ssdp
#import confluent.discovery.protocols.mdns as mdns
import confluent.discovery.protocols.slp as slp
import confluent.discovery.handlers.imm as imm
import confluent.discovery.handlers.cpstorage as cpstorage
@ -117,6 +118,7 @@ nodehandlers = {
'onie-switch': None,
'cumulus-switch': None,
'affluent-switch': None,
#'openbmc': None,
'service:io-device.Lenovo:management-module': None,
'service:thinkagile-storage': cpstorage,
'service:lenovo-tsm': tsm,
@ -130,6 +132,7 @@ servicenames = {
'service:lenovo-smm2': 'lenovo-smm2',
'affluent-switch': 'affluent-switch',
'lenovo-xcc': 'lenovo-xcc',
#'openbmc': 'openbmc',
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
'service:io-device.Lenovo:management-module': 'lenovo-switch',
'service:thinkagile-storage': 'thinkagile-storagebmc',
@ -1631,6 +1634,7 @@ def stop_autosense():
def start_autosense():
autosensors.add(eventlet.spawn(slp.snoop, safe_detected, slp))
#autosensors.add(eventlet.spawn(mdns.snoop, safe_detected, mdns))
autosensors.add(eventlet.spawn(pxe.snoop, safe_detected, pxe, get_node_guess_by_uuid))
remotescan()

View File

@ -48,25 +48,7 @@ webclient = eventlet.import_patched('pyghmi.util.webclient')
mcastv4addr = '224.0.0.251'
mcastv6addr = 'ff02::fb'
ssdp6mcast = socket.inet_pton(socket.AF_INET6, mcastv6addr)
def oldactive_scan(handler, protocol=None):
known_peers = set([])
for scanned in scan(['urn:dmtf-org:service:redfish-rest:1', 'urn::service:affluent']):
for addr in scanned['addresses']:
if addr in known_peers:
break
hwaddr = neighutil.get_hwaddr(addr[0])
if not hwaddr:
continue
if not scanned.get('hwaddr', None):
scanned['hwaddr'] = hwaddr
known_peers.add(addr)
else:
scanned['protocol'] = protocol
handler(scanned)
mdns6mcast = socket.inet_pton(socket.AF_INET6, mcastv6addr)
def name_to_qname(name):
nameparts = name.split('.')
@ -76,59 +58,50 @@ def name_to_qname(name):
qname += bytes(bytearray([len(namepart)])) + namepart
qname += b'\x00'
return qname
PTR = 12
SRV = 33
listsrvs = struct.pack('!HHHHHH',
def _makequery(name):
return struct.pack('!HHHHHH',
0, # transaction id
0, # flags, stdard query
1, # query count
0, # answers
0, # authorities
0) + \
name_to_qname('_services._dns-sd._udp.local') + \
name_to_qname(name) + \
struct.pack('!HH',
12, # PTR
(1 << 15) | 1)
def scan(services, target=None):
pass
PTR,
(1 << 15) | 1) # Unicast response
#listsrvs = _makequery('_services._dns-sd._udp.local') # to get all possible services
listobmccons = _makequery('_obmc_console._tcp.local')
def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler):
if mac in peerbymacaddress and peer not in peerbymacaddress[mac]['addresses']:
peerbymacaddress[mac]['addresses'].append(peer)
else:
sdata = _mdns_to_dict(rsp)
if not sdata:
return 0
peerdata = {
'hwaddr': mac,
'addresses': [peer],
'services': ['openbmc'],
'urls': '/redfish/v1/'
}
for headline in rsp[1:]:
if not headline:
continue
headline = util.stringify(headline)
header, _, value = headline.partition(':')
header = header.strip()
value = value.strip()
if header == 'NT':
if 'redfish-rest' not in value:
return
elif header == 'NTS':
if value == 'ssdp:byebye':
handler = byehandler
elif value != 'ssdp:alive':
handler = None
elif header == 'AL':
if not value.endswith('/redfish/v1/'):
return
elif header == 'LOCATION':
if not value.endswith('/DeviceDescription.json'):
return
if sdata.get('ttl', 0) == 0:
if byehandler:
eventlet.spawn_n(check_fish_handler, byehandler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer)
return 1
if handler:
eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer)
return 2
def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer):
retdata = check_fish(('/DeviceDescription.json', peerdata))
retdata = check_fish(('/redfish/v1/', peerdata))
if retdata:
known_peers.add(peer)
newmacs.add(mac)
@ -137,7 +110,7 @@ def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress
def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
"""Watch for SSDP notify messages
"""Watch for unsolicited mDNS answers
The handler shall be called on any service coming online.
byehandler is called whenever a system advertises that it is departing.
@ -152,6 +125,7 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
# Normally, I like using v6/v4 agnostic socket. However, since we are
# dabbling in multicast wizardry here, such sockets can cause big problems,
# so we will have two distinct sockets
# TTL=0 is a wthdrawal, otherwise an announce
tracelog = log.Logger('trace')
net4, net6 = get_sockets()
net6.bind(('', 5353))
@ -163,22 +137,19 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
event=log.Events.stacktrace)
known_peers = set([])
recent_peers = set([])
init_sockets()
for ifidx in util.list_interface_indexes():
v6grp = ssdp6mcast + struct.pack('=I', ifidx)
v6grp = mdns6mcast + struct.pack('=I', ifidx)
net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, v6grp)
for i4 in util.list_ips():
ssdp4mcast = socket.inet_pton(socket.AF_INET, mcastv4addr) + \
mdns4mcast = socket.inet_pton(socket.AF_INET, mcastv4addr) + \
socket.inet_aton(i4['addr'])
try:
net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,
ssdp4mcast)
mdns4mcast)
except socket.error as e:
if e.errno != 98:
# errno 98 can happen if aliased, skip for now
raise
peerbymacaddress = {}
while True:
try:
@ -192,103 +163,20 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
while r and len(deferrednotifies) < 256:
for s in r:
(rsp, peer) = s.recvfrom(9000)
if rsp[:4] == b'PING':
continue
if peer in recent_peers:
continue
rsp = rsp.split(b'\r\n')
if b' ' not in rsp[0]:
continue
method, _ = rsp[0].split(b' ', 1)
if method == b'NOTIFY':
if peer in known_peers:
mac = neighutil.get_hwaddr(peer[0])
if not mac:
probepeer = (peer[0], struct.unpack('H', os.urandom(2))[0] | 1025) + peer[2:]
try:
s.sendto(b'\x00', probepeer)
except Exception:
continue
deferrednotifies.append((peer, rsp))
datum = _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler)
if datum == 2:
recent_peers.add(peer)
mac = neighutil.get_hwaddr(peer[0])
if mac == False:
# neighutil determined peer ip is not local, skip attempt
# to probe and critically, skip growing deferrednotifiers
continue
if not mac:
probepeer = (peer[0], struct.unpack('H', os.urandom(2))[0] | 1025) + peer[2:]
try:
s.sendto(b'\x00', probepeer)
except Exception:
continue
deferrednotifies.append((peer, rsp))
continue
_process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler)
elif method == b'M-SEARCH':
if not uuidlookup:
continue
#ip = peer[0].partition('%')[0]
for headline in rsp[1:]:
if not headline:
continue
headline = util.stringify(headline)
headline = headline.partition(':')
if len(headline) < 3:
continue
forcereply = False
if headline[0] == 'ST' and headline[-1].startswith(' urn:xcat.org:service:confluent:'):
try:
cfm.check_quorum()
except Exception:
continue
for query in headline[-1].split('/'):
node = None
if query.startswith('confluentuuid='):
myuuid = cfm.get_global('confluent_uuid')
curruuid = query.split('=', 1)[1].lower()
if curruuid != myuuid:
break
forcereply = True
elif query.startswith('allconfluent=1'):
reply = 'HTTP/1.1 200 OK\r\n\r\nCONFLUENT: PRESENT\r\n'
if not isinstance(reply, bytes):
reply = reply.encode('utf8')
s.sendto(reply, peer)
elif query.startswith('uuid='):
curruuid = query.split('=', 1)[1].lower()
node = uuidlookup(curruuid)
elif query.startswith('mac='):
currmac = query.split('=', 1)[1].lower()
node = uuidlookup(currmac)
if node:
cfg = cfm.ConfigManager(None)
cfd = cfg.get_node_attributes(
node, ['deployment.pendingprofile', 'collective.managercandidates'])
if not forcereply:
# Do not bother replying to a node that
# we have no deployment activity
# planned for
if not cfd.get(node, {}).get(
'deployment.pendingprofile', {}).get('value', None):
break
candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None)
if candmgrs:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
if collective.get_myname() not in candmgrs:
break
currtime = time.time()
seconds = int(currtime)
msecs = int(currtime * 1000 % 1000)
reply = 'HTTP/1.1 200 OK\r\nNODENAME: {0}\r\nCURRTIME: {1}\r\nCURRMSECS: {2}\r\n'.format(node, seconds, msecs)
if '%' in peer[0]:
iface = socket.getaddrinfo(peer[0], 0, socket.AF_INET6, socket.SOCK_DGRAM)[0][-1][-1]
reply += 'MGTIFACE: {0}\r\n'.format(
peer[0].split('%', 1)[1])
ncfg = netutil.get_nic_config(
cfg, node, ifidx=iface)
if ncfg.get('matchesnodename', None):
reply += 'DEFAULTNET: 1\r\n'
elif not netutil.address_is_local(peer[0]):
continue
if not isinstance(reply, bytes):
reply = reply.encode('utf8')
s.sendto(reply, peer)
break
r = select.select((net4, net6), (), (), 0.2)
r = select.select((net4, net6), (), (), 1.5)
if r:
r = r[0]
if deferrednotifies:
@ -330,7 +218,7 @@ def active_scan(handler, protocol=None):
net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF,
idx)
try:
net6.sendto(listsrvs, (mcastv6addr, 5353, 0, 0))
net6.sendto(listobmccons, (mcastv6addr, 5353, 0, 0))
except socket.error:
# ignore interfaces without ipv6 multicast causing error
pass
@ -341,7 +229,7 @@ def active_scan(handler, protocol=None):
net4.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
socket.inet_aton(addr))
try:
net4.sendto(listsrvs, (mcastv4addr, 5353))
net4.sendto(listobmccons, (mcastv4addr, 5353))
except socket.error as se:
if se.errno != 101 and se.errno != 1:
raise
@ -360,8 +248,7 @@ def active_scan(handler, protocol=None):
continue
deferparse.append((rsp, peer))
continue
_parse_mdns(peer, rsp, peerdata)
#TODO_parse_ssdp(peer, rsp, peerdata)
_parse_mdns(peer, rsp, peerdata, '_obmc_console._tcp.local')
timeout = deadline - util.monotonic_time()
if timeout < 0:
timeout = 0
@ -370,35 +257,13 @@ def active_scan(handler, protocol=None):
eventlet.sleep(2.2)
for dp in deferparse:
rsp, peer = dp
_parse_mdns(peer, rsp, peerdata)
#_parse_ssdp(peer, rsp, peerdata)
handler(peerdata)
return
_parse_mdns(peer, rsp, peerdata, '_obmc_console._tcp.local')
querypool = gp.GreenPool()
pooltargs = []
for nid in peerdata:
if peerdata[nid].get('services', [None])[0] == 'urn::service:affluent:1':
peerdata[nid]['attributes'] = {
'type': 'affluent-switch',
}
peerdata[nid]['services'] = ['affluent-switch']
mya = peerdata[nid]['attributes']
usn = peerdata[nid]['usn']
idinfo = usn.split('::')
for idi in idinfo:
key, val = idi.split(':', 1)
if key == 'uuid':
peerdata[nid]['uuid'] = val
elif key == 'serial':
mya['enclosure-serial-number'] = [val]
elif key == 'model':
mya['enclosure-machinetype-model'] = [val]
return peerdata[nid]
continue
if '/redfish/v1/' not in peerdata[nid].get('urls', ()) and '/redfish/v1' not in peerdata[nid].get('urls', ()):
continue
if '/DeviceDescription.json' in peerdata[nid]['urls']:
pooltargs.append(('/DeviceDescription.json', peerdata[nid]))
pooltargs.append(('/redfish/v1/', peerdata[nid]))
# For now, don't interrogate generic redfish bmcs
# This is due to a need to deduplicate from some supported SLP
# targets (IMM, TSM, others)
@ -408,7 +273,7 @@ def active_scan(handler, protocol=None):
# pooltargs.append(('/redfish/v1/', peerdata[nid]))
for pi in querypool.imap(check_fish, pooltargs):
if pi is not None:
return pi
handler(pi)
def check_fish(urldata, port=443, verifycallback=None):
if not verifycallback:
@ -435,7 +300,10 @@ def check_fish(urldata, port=443, verifycallback=None):
peerinfo = wc.grab_json_response('/redfish/v1/')
if url == '/redfish/v1/':
if 'UUID' in peerinfo:
data['services'] = ['service:redfish-bmc']
if 'services' not in data:
data['services'] = ['service:redfish-bmc']
else:
data['services'].append('service:redfish-bmc')
data['uuid'] = peerinfo['UUID'].lower()
return data
return None
@ -452,6 +320,9 @@ def extract_qname(view, reply):
name += extract_qname(reply[view[1]:], reply)[1] + '.'
view = view[2:]
idx += 1
if name:
return idx, name[:-1]
return idx, ''
else:
name += view[1:currlen + 1].tobytes().decode('utf8') + '.'
view = view[currlen + 1:]
@ -468,7 +339,43 @@ def extract_qname(view, reply):
def _parse_mdns(peer, rsp, peerdata):
def _mdns_to_dict(rsp):
txid, flags, quests, answers, arr, morerr = struct.unpack('!HHHHHH', rsp[:12])
rv = memoryview(rsp[12:])
rspv = memoryview(rsp)
retval = {}
while quests:
idx, name = extract_qname(rv, rspv)
rv = rv[idx:]
typ, dclass = struct.unpack('!HH', rv[:4])
quests -= 1
rv = rv[4:]
while answers:
idx, name = extract_qname(rv, rspv)
rv = rv[idx:]
typ, dclass, ttl, dlen = struct.unpack('!HHIH', rv[:10])
if 0 and typ == 12: # PTR, we don't need for now...
adata = extract_qname(rv[10:], rspv)
if 'ptrs' not in retval:
retval['ptrs'] = [{'name': adata, 'ttl': ttl}]
else:
retval['ptrs'].append({'name': adata, 'ttl': ttl})
if typ == 33:
portnum = struct.unpack('!H', rv[14:16])[0]
retval['protoname'] = name.split('.', 1)[1]
retval['portnumber'] = portnum
retval['ttl'] = ttl
rv = rv[dlen + 10:]
answers -= 1
return retval
def _parse_mdns(peer, rsp, peerdata, srvname):
parsed = _mdns_to_dict(rsp)
if not parsed:
return
if parsed.get('ttl', 0) == 0:
return
nid = peer[0]
mac = neighutil.get_hwaddr(peer[0])
if mac:
@ -481,28 +388,13 @@ def _parse_mdns(peer, rsp, peerdata):
peerdatum = {
'addresses': [peer],
'hwaddr': mac,
'services': []
'services': [srvname]
}
if srvname == '_obmc_console._tcp.local':
peerdatum['services'] = ['openbmc']
peerdatum['urls'] = ['/redfish/v1/']
peerdata[nid] = peerdatum
txid, flags, quests, answers, arr, morerr = struct.unpack('!HHHHHH', rsp[:12])
rv = memoryview(rsp[12:])
rspv = memoryview(rsp)
while quests:
idx, name = extract_qname(rv, rspv)
rv = rv[idx:]
typ, dclass = struct.unpack('!HH', rv[:4])
quests -= 1
rv = rv[4:]
while answers:
idx, name = extract_qname(rv, rspv)
rv = rv[idx:]
typ, dclass, ttl, dlen = struct.unpack('!HHIH', rv[:10])
if typ == 12: # PTR
adata = extract_qname(rv[10:], rspv)
if adata[1] not in peerdatum['services']:
peerdatum['services'].append(adata[1])
rv = rv[dlen + 10:]
answers -= 1
def _parse_ssdp(peer, rsp, peerdata):
nid = peer[0]
@ -554,4 +446,4 @@ from pprint import pprint
if __name__ == '__main__':
def printit(rsp):
print(repr(rsp))
active_scan(pprint)
snoop(pprint)

View File

@ -297,92 +297,96 @@ def proxydhcp(handler, nodeguess):
msg.msg_namelen = ctypes.sizeof(clientaddr)
cfg = cfm.ConfigManager(None)
while True:
ready = select.select([net4011], [], [], None)
if not ready or not ready[0]:
continue
i = recvmsg(net4011.fileno(), ctypes.pointer(msg), 0)
#nb, client = net4011.recvfrom_into(rq)
if i < 240:
continue
rqv = memoryview(rq)[:i]
client = (ipfromint(clientaddr.sin_addr.s_addr), socket.htons(clientaddr.sin_port))
_, level, typ = struct.unpack('QII', cmsgarr[:16])
if level == socket.IPPROTO_IP and typ == IP_PKTINFO:
idx, recv = struct.unpack('II', cmsgarr[16:24])
recv = ipfromint(recv)
try:
optidx = rqv.tobytes().index(b'\x63\x82\x53\x63') + 4
except ValueError:
continue
hwlen = rq[2]
opts, disco = opts_to_dict(rq, optidx, 3)
disco['hwaddr'] = ':'.join(['{0:02x}'.format(x) for x in rq[28:28+hwlen]])
node = None
if disco.get('hwaddr', None) in macmap:
node = macmap[disco['hwaddr']]
elif disco.get('uuid', None) in uuidmap:
node = uuidmap[disco['uuid']]
myipn = myipbypeer.get(rqv[28:28+hwlen].tobytes(), None)
skiplogging = True
netaddr = disco['hwaddr']
if time.time() > ignoredisco.get(netaddr, 0) + 90:
skiplogging = False
ignoredisco[netaddr] = time.time()
if not myipn:
info = {'hwaddr': netaddr, 'uuid': disco['uuid'],
'architecture': disco['arch'],
'netinfo': {'ifidx': idx, 'recvip': recv},
'services': ('pxe-client',)}
if not skiplogging:
handler(info)
if not node:
if not myipn and not skiplogging:
log.log(
{'info': 'No node matches boot attempt from uuid {0} or hardware address {1}'.format(
disco.get('uuid', 'unknown'), disco.get('hwaddr', 'unknown')
)})
continue
profile = None
if not myipn:
myipn = socket.inet_aton(recv)
profile = get_deployment_profile(node, cfg)
if profile:
log.log({
'info': 'Offering proxyDHCP boot from {0} to {1} ({2})'.format(recv, node, client[0])})
else:
ready = select.select([net4011], [], [], None)
if not ready or not ready[0]:
continue
i = recvmsg(net4011.fileno(), ctypes.pointer(msg), 0)
#nb, client = net4011.recvfrom_into(rq)
if i < 240:
continue
rqv = memoryview(rq)[:i]
client = (ipfromint(clientaddr.sin_addr.s_addr), socket.htons(clientaddr.sin_port))
_, level, typ = struct.unpack('QII', cmsgarr[:16])
if level == socket.IPPROTO_IP and typ == IP_PKTINFO:
idx, recv = struct.unpack('II', cmsgarr[16:24])
recv = ipfromint(recv)
try:
optidx = rqv.tobytes().index(b'\x63\x82\x53\x63') + 4
except ValueError:
continue
hwlen = rq[2]
opts, disco = opts_to_dict(rq, optidx, 3)
disco['hwaddr'] = ':'.join(['{0:02x}'.format(x) for x in rq[28:28+hwlen]])
node = None
if disco.get('hwaddr', None) in macmap:
node = macmap[disco['hwaddr']]
elif disco.get('uuid', None) in uuidmap:
node = uuidmap[disco['uuid']]
myipn = myipbypeer.get(rqv[28:28+hwlen].tobytes(), None)
skiplogging = True
netaddr = disco['hwaddr']
if time.time() > ignoredisco.get(netaddr, 0) + 90:
skiplogging = False
ignoredisco[netaddr] = time.time()
if not myipn:
info = {'hwaddr': netaddr, 'uuid': disco['uuid'],
'architecture': disco['arch'],
'netinfo': {'ifidx': idx, 'recvip': recv},
'services': ('pxe-client',)}
if not skiplogging:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP reply'.format(node)})
handler(info)
if not node:
if not myipn and not skiplogging:
log.log(
{'info': 'No node matches boot attempt from uuid {0} or hardware address {1}'.format(
disco.get('uuid', 'unknown'), disco.get('hwaddr', 'unknown')
)})
continue
if opts.get(77, None) == b'iPXE':
if not profile:
profile = None
if not myipn:
myipn = socket.inet_aton(recv)
profile = get_deployment_profile(node, cfg)
if not profile:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP reply'.format(node)})
if profile:
log.log({
'info': 'Offering proxyDHCP boot from {0} to {1} ({2})'.format(recv, node, client[0])})
else:
if not skiplogging:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP reply'.format(node)})
continue
if opts.get(77, None) == b'iPXE':
if not profile:
profile = get_deployment_profile(node, cfg)
if not profile:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP reply'.format(node)})
continue
myip = socket.inet_ntoa(myipn)
bootfile = 'http://{0}/confluent-public/os/{1}/boot.ipxe'.format(myip, profile).encode('utf8')
elif disco['arch'] == 'uefi-x64':
bootfile = b'confluent/x86_64/ipxe.efi'
elif disco['arch'] == 'bios-x86':
bootfile = b'confluent/x86_64/ipxe.kkpxe'
elif disco['arch'] == 'uefi-aarch64':
bootfile = b'confluent/aarch64/ipxe.efi'
if len(bootfile) > 127:
log.log(
{'info': 'Boot offer cannot be made to {0} as the '
'profile name "{1}" is {2} characters longer than is supported '
'for this boot method.'.format(
node, profile, len(bootfile) - 127)})
continue
myip = socket.inet_ntoa(myipn)
bootfile = 'http://{0}/confluent-public/os/{1}/boot.ipxe'.format(myip, profile).encode('utf8')
elif disco['arch'] == 'uefi-x64':
bootfile = b'confluent/x86_64/ipxe.efi'
elif disco['arch'] == 'bios-x86':
bootfile = b'confluent/x86_64/ipxe.kkpxe'
elif disco['arch'] == 'uefi-aarch64':
bootfile = b'confluent/aarch64/ipxe.efi'
if len(bootfile) > 127:
log.log(
{'info': 'Boot offer cannot be made to {0} as the '
'profile name "{1}" is {2} characters longer than is supported '
'for this boot method.'.format(
node, profile, len(bootfile) - 127)})
continue
rpv[:240] = rqv[:240].tobytes()
rpv[0:1] = b'\x02'
rpv[108:108 + len(bootfile)] = bootfile
rpv[240:243] = b'\x35\x01\x05'
rpv[243:249] = b'\x36\x04' + myipn
rpv[20:24] = myipn
rpv[249:268] = b'\x61\x11' + opts[97]
rpv[268:280] = b'\x3c\x09PXEClient\xff'
net4011.sendto(rpv[:281], client)
rpv[:240] = rqv[:240].tobytes()
rpv[0:1] = b'\x02'
rpv[108:108 + len(bootfile)] = bootfile
rpv[240:243] = b'\x35\x01\x05'
rpv[243:249] = b'\x36\x04' + myipn
rpv[20:24] = myipn
rpv[249:268] = b'\x61\x11' + opts[97]
rpv[268:280] = b'\x3c\x09PXEClient\xff'
net4011.sendto(rpv[:281], client)
except Exception as e:
tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
def start_proxydhcp(handler, nodeguess=None):

View File

@ -0,0 +1,266 @@
# Copyright 2022 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml.etree.ElementTree import fromstring as rfromstring
import pyghmi.util.webclient as wc
import confluent.util as util
import confluent.messages as msg
import confluent.exceptions as exc
import eventlet.green.time as time
import eventlet.green.socket as socket
import eventlet.greenpool as greenpool
import eventlet
try:
import Cookie
httplib = eventlet.import_patched('httplib')
except ImportError:
httplib = eventlet.import_patched('http.client')
import http.cookies as Cookie
sensorsbymodel = {
'FS1350': ['alarms', 'dt', 'duty', 'dw', 'mode', 'p3state', 'primflow', 'ps1', 'ps1a', 'ps1b', 'ps2', 'ps3', 'ps4', 'ps5a', 'ps5b', 'ps5c', 'pumpspeed1', 'pumpspeed2', 'pumpspeed3', 'rh', 'sdp', 'secflow', 'setpoint', 't1', 't2', 't2a', 't2b', 't2c', 't3', 't3', 't4', 't5', 'valve', 'valve2'],
'FS600': ['alarms', 'dt', 'duty', 'dw', 'mode', 'p3state', 'pdp', 'primflow', 'ps1', 'ps1a', 'ps1b', 'ps2', 'ps3', 'ps4', 'ps5a', 'ps5b', 'pumpspeed1', 'pumpspeed2', 'rh', 'sdp', 'secflow', 'setpoint', 't1', 't2', 't2a', 't2b', 't2c', 't3', 't3', 't4', 't5', 'valve'],
'RM100': ['alarms', 'dt', 'duty', 'dw', 'mode', 'p3state', 'primflow', 'ps1', 'ps1a', 'ps2', 'ps3', 'pumpspeed1', 'pumpspeed2', 'rh', 'sdp', 'secflow', 'setpoint', 't1', 't2', 't2a', 't2b', 't2c', 't3', 't3', 't4', 't5', 'valve'],
}
_thesensors = {
'RM100': {
't1': ('Primary loop supply temperature', 'degC'),
't2': ('Secondary loop supply temperature', 'degC'),
't4': ('Secondary loop return temperature', 'degC'),
't3': ('Ambient air temperature', 'degC'),
't5': ('Primary loop return temperature', 'degC'),
'rh': ('Relative Humidity', '%'),
'dw': ('Dewpoint', 'degC'),
'pumpspeed1': ('Pump 1 Speed', '%'),
'pumpspeed2': ('Pump 2 Speed', '%'),
'alarms': ('Number of active alarms', ''),
'primflow': ('Input flow rate', 'l/m'),
'secflow': ('Output flow rate', 'l/m'),
'ps1': ('Secondary loop return pressure', 'bar'),
'ps3': ('Secondary loop supply pressure', 'bar'),
},
'FS600': {
't1': ('Primary loop supply temperature', 'degC'),
't2': ('Secondary loop supply temperature', 'degC'),
't4': ('Secondary loop return temperature', 'degC'),
't5': ('Primary loop return temperature', 'degC'),
't3': ('Ambient air temperature', 'degC'),
'rh': ('Relative Humidity', '%'),
'dw': ('Dewpoint', 'degC'),
'pumpspeed1': ('Pump 1 Speed', '%'),
'pumpspeed2': ('Pump 2 Speed', '%'),
'alarms': ('Number of active alarms', ''),
'primflow': ('Input flow rate', 'l/m'),
'secflow': ('Output flow rate', 'l/m'),
'ps1': ('Secondary loop return pressure', 'bar'),
'ps3': ('Primary loop supply pressure', 'bar'),
'ps2': ('Secondary loop supply pressure', 'bar'),
},
'FS1350': {
't1': ('Primary loop supply temperature', 'degC'),
't2': ('Secondary loop supply temperature', 'degC'),
't4': ('Secondary loop return temperature', 'degC'),
't5': ('Primary loop return temperature', 'degC'),
't3': ('Ambient air temperature', 'degC'),
'rh': ('Relative Humidity', '%'),
'dw': ('Dewpoint', 'degC'),
'pumpspeed1': ('Pump 1 Speed', '%'),
'pumpspeed2': ('Pump 2 Speed', '%'),
'pumpspeed3': ('Pump 2 Speed', '%'),
'alarms': ('Number of active alarms', ''),
'primflow': ('Input flow rate', 'l/m'),
'secflow': ('Output flow rate', 'l/m'),
'ps1': ('Secondary loop return pressure', 'bar'),
'ps3': ('Primary loop supply pressure', 'bar'),
'ps2': ('Secondary loop supply pressure', 'bar'),
'ps4': ('Primary loop return pressure', 'bar'),
},
}
def fromstring(inputdata):
if isinstance(inputdata, bytes):
cmpstr = b'!entity'
else:
cmpstr = '!entity'
if cmpstr in inputdata.lower():
raise Exception('!ENTITY not supported in this interface')
# The measures above should filter out the risky facets of xml
# We don't need sophisticated feature support
return rfromstring(inputdata) # nosec
def simplify_name(name):
return name.lower().replace(' ', '_').replace('/', '-').replace(
'_-_', '-')
pdupool = greenpool.GreenPool(128)
class WebResponse(httplib.HTTPResponse):
def _check_close(self):
return True
class WebConnection(wc.SecureHTTPConnection):
response_class = WebResponse
def __init__(self, host, secure, verifycallback):
if secure:
port = 443
else:
port = 80
wc.SecureHTTPConnection.__init__(self, host, port, verifycallback=verifycallback)
self.secure = secure
self.cookies = {}
def connect(self):
if self.secure:
return super(WebConnection, self).connect()
addrinfo = socket.getaddrinfo(self.host, self.port)[0]
# workaround problems of too large mtu, moderately frequent occurance
# in this space
plainsock = socket.socket(addrinfo[0])
plainsock.settimeout(self.mytimeout)
try:
plainsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG, 1456)
except socket.error:
pass
plainsock.connect(addrinfo[4])
self.sock = plainsock
def getresponse(self):
try:
rsp = super(WebConnection, self).getresponse()
try:
hdrs = [x.split(':', 1) for x in rsp.msg.headers]
except AttributeError:
hdrs = rsp.msg.items()
for hdr in hdrs:
if hdr[0] == 'Set-Cookie':
c = Cookie.BaseCookie(hdr[1])
for k in c:
self.cookies[k] = c[k].value
except httplib.BadStatusLine:
self.broken = True
raise
return rsp
def request(self, method, url, body=None):
headers = {}
if body:
headers['Content-Length'] = len(body)
cookies = []
for cookie in self.cookies:
cookies.append('{0}={1}'.format(cookie, self.cookies[cookie]))
headers['Cookie'] = ';'.join(cookies)
headers['Host'] = 'pdu.cluster.net'
headers['Accept'] = '*/*'
headers['Accept-Language'] = 'en-US,en;q=0.9'
headers['Connection'] = 'close'
headers['Referer'] = 'http://pdu.cluster.net/setting_admin.htm'
return super(WebConnection, self).request(method, url, body, headers)
def grab_response(self, url, body=None, method=None):
if method is None:
method = 'GET' if body is None else 'POST'
if body:
self.request(method, url, body)
else:
self.request(method, url)
rsp = self.getresponse()
body = rsp.read()
return body, rsp.status
class CoolteraClient(object):
def __init__(self, cdu, configmanager):
self.node = cdu
self.configmanager = configmanager
self._wc = None
@property
def wc(self):
if self._wc:
return self._wc
targcfg = self.configmanager.get_node_attributes(self.node,
['hardwaremanagement.manager'],
decrypt=True)
targcfg = targcfg.get(self.node, {})
target = targcfg.get(
'hardwaremanagement.manager', {}).get('value', None)
if not target:
target = self.node
target = target.split('/', 1)[0]
cv = util.TLSCertVerifier(
self.configmanager, self.node,
'pubkeys.tls_hardwaremanager').verify_cert
self._wc = WebConnection(target, secure=True, verifycallback=cv)
return self._wc
def xml2stateinfo(statdata):
statdata = fromstring(statdata)
stateinfo = []
sensornames = sorted([x.tag for x in statdata])
themodel = None
for model in sensorsbymodel:
if sensorsbymodel[model] == sensornames:
themodel = model
break
thesensors = _thesensors[themodel]
#['mode', 't1', 't2a', 't2b', 't2c', 't2', 't5', 't3', 't4', 'dw', 't3', 'rh', 'setpoint', 'secflow', 'primflow', 'ps1', 'ps1a', 'ps1b', 'ps2', 'ps3', 'ps4', 'ps5a', 'ps5b', 'ps5c', 'sdp', 'valve', 'valve2', 'pumpspeed1', 'pumpspeed2', 'pumpspeed3', 'alarms', 'dt', 'p3state', 'duty']
for tagname in thesensors:
label, units = thesensors[tagname]
val = statdata.find(tagname).text.replace(units, '').strip()
stateinfo.append({
'name': label,
'value': val,
'units': units.replace('degC', '°C'),
'type': 'Temperature',
})
return stateinfo
_sensors_by_node = {}
def read_sensors(element, node, configmanager):
category, name = element[-2:]
justnames = False
if len(element) == 3:
# just get names
category = name
name = 'all'
justnames = True
for sensor in sensors:
yield msg.ChildCollection(simplify_name(sensors[sensor][0]))
return
if category in ('leds, fans'):
return
sn = _sensors_by_node.get(node, None)
if not sn or sn[1] < time.time():
cc = CoolteraClient(node, configmanager)
cc.wc.request('GET', '/status.xml')
rsp = cc.wc.getresponse()
statdata = rsp.read()
statinfo = xml2stateinfo(statdata)
_sensors_by_node[node] = (statinfo, time.time() + 1)
sn = _sensors_by_node.get(node, None)
if sn:
yield msg.SensorReadings(sn[0], name=node)
def retrieve(nodes, element, configmanager, inputdata):
if element[0] == 'sensors':
gp = greenpool.GreenPile(pdupool)
for node in nodes:
gp.spawn(read_sensors, element, node, configmanager)
for rsp in gp:
for datum in rsp:
yield datum
return

View File

@ -17,6 +17,7 @@ import confluent.util as util
import confluent.messages as msg
import confluent.exceptions as exc
import eventlet.green.time as time
import eventlet
import eventlet.greenpool as greenpool
def simplify_name(name):
@ -226,5 +227,6 @@ def update(nodes, element, configmanager, inputdata):
gc = GeistClient(node, configmanager)
newstate = inputdata.powerstate(node)
gc.set_outlet(element[-1], newstate)
eventlet.sleep(1)
for res in retrieve(nodes, element, configmanager, inputdata):
yield res

View File

@ -549,7 +549,10 @@ class SuseHandler(OsHandler):
subprocess.check_call(cmd)
subprocess.check_call(['zypper', '-R', self.targpath, 'install'] + self.zyppargs)
os.symlink('/usr/lib/systemd/system/sshd.service', os.path.join(self.targpath, 'etc/systemd/system/multi-user.target.wants/sshd.service'))
args.cmd = ['mkinitrd']
if os.path.exists(os.path.join(self.targpath, 'sbin/mkinitrd')):
args.cmd = ['mkinitrd']
else:
args.cmd = ['dracut', '-f']
run_constrainedx(fancy_chroot, (args, self.targpath))
@ -1206,7 +1209,14 @@ def recursecp(source, targ):
def pack_image(args):
outdir = args.profilename
if '/' in outdir:
raise Exception('Full path not supported, supply only the profile name')
raise Exception('Full path not supported, supply only the profile name\n')
if args.baseprofile:
baseprofiledir = args.baseprofile
if '/' not in args.baseprofile:
baseprofiledir = os.path.join('/var/lib/confluent/public/os', args.baseprofile)
if not os.path.exists(baseprofiledir):
sys.stderr.write('Specified base profile "{0}" does not seem to exist\n'.format(baseprofiledir))
sys.exit(1)
privdir = os.path.join('/var/lib/confluent/private/os/', outdir)
outdir = os.path.join('/var/lib/confluent/public/os/', outdir)
if os.path.exists(outdir):