2
0
mirror of https://github.com/xcat2/confluent.git synced 2025-08-15 15:50:24 +00:00

Implement Lenovo Thinksystem and discovery support

This commit is contained in:
Jarrod Johnson
2017-06-20 14:56:24 -04:00
parent 6117a90372
commit e0cc67f57a
43 changed files with 1435 additions and 472 deletions

View File

@@ -47,6 +47,7 @@ import optparse
import os
import select
import shlex
import signal
import socket
import sys
import time
@@ -56,7 +57,10 @@ try:
import tty
except ImportError:
pass
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
exitcode = 0
consoleonly = False
consolename = ""
@@ -84,6 +88,32 @@ netserver = None
laststate = {}
def print_help():
print("confetty provides a filesystem like interface to confluent. "
"Navigation is done using the same commands as would be used in a "
"filesystem. Tab completion is supported to aid in navigation,"
"as is up arrow to recall previous commands and control-r to search"
"previous command history, similar to using bash\n\n"
"The supported commands are:\n"
"cd [location] - Set the current command context, similar to a "
"working directory.\n"
"show [resource] - Present the information about the specified "
"resource, or current context if omitted.\n"
"create [resource] attributename=value attributename=value - Create "
"a new instance of a resource.\n"
"remove [resource] - Remove a resource from a list\n"
"set [resource] attributename=value attributename=value - Change "
"the specified attributes value for the given resource name\n"
"unset [resource] attributename - Clear any value for the given "
"attribute names on a resource.\n"
"start [resource] - When used on a text session resource, it "
"enters remote terminal mode. In this mode, use 'ctrl-e, c, ?' for "
"help"
)
#TODO(jjohnson2): lookup context help for 'target' variable, perhaps
#common with the api document
def updatestatus(stateinfo={}):
status = consolename
info = []
@@ -106,7 +136,7 @@ def updatestatus(stateinfo={}):
if 'showtime' in laststate:
showtime = laststate['showtime']
age = time.time() - laststate['showtime']
if age > 86400: # older than one day
if age > 86400: # older than one day
# disambiguate by putting date in and time
info.append(time.strftime('%m-%dT%H:%M', time.localtime(showtime)))
else:
@@ -169,6 +199,7 @@ valid_commands = [
'remove',
'rm',
'delete',
'help',
]
candidates = None
@@ -238,7 +269,7 @@ def parse_command(command):
try:
args = shlex.split(command, posix=True)
except ValueError as ve:
print('Error: ' + ve.message)
print('Error: ' + str(ve))
return []
return args
@@ -306,7 +337,11 @@ def do_command(command, server):
return
argv[0] = argv[0].lower()
if argv[0] == 'exit':
if os.environ['TERM'] not in ('linux'):
sys.stdout.write('\x1b]0;\x07')
sys.exit(0)
elif argv[0] in ('help', '?'):
return print_help()
elif argv[0] == 'cd':
otarget = target
if len(argv) > 1:
@@ -348,6 +383,21 @@ def do_command(command, server):
elif argv[0] in ('cat', 'show', 'ls', 'dir'):
if len(argv) > 1:
targpath = fullpath_target(argv[1])
if argv[0] in ('ls', 'dir'):
if targpath[-1] != '/':
# could still be a directory, fetch the parent..
childname = targpath[targpath.rindex('/') + 1:]
parentpath = targpath[:targpath.rindex('/') + 1]
if parentpath != '/noderange/':
# if it were /noderange/, then it's a directory
# even though parent won't tell us that
for res in session.read(parentpath, server):
try:
if res['item']['href'] == childname:
print(childname)
return
except KeyError:
pass
else:
targpath = target
for res in session.read(targpath):
@@ -418,6 +468,10 @@ def createresource(args):
def makecall(callout, args):
global exitcode
for response in callout(*args):
if 'deleted' in response:
print("Deleted: " + response['deleted'])
if 'created' in response:
print("Created: " + response['created'])
if 'error' in response:
if 'errorcode' in response:
exitcode = response['errorcode']
@@ -526,7 +580,11 @@ def quitconfetty(code=0, fullexit=False, fixterm=True):
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, currfl ^ os.O_NONBLOCK)
if oldtcattr is not None:
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, oldtcattr)
# Request default color scheme, to undo potential weirdness of terminal
sys.stdout.write('\x1b[m')
if fullexit:
if os.environ['TERM'] not in ('linux'):
sys.stdout.write('\x1b]0;\x07')
sys.exit(code)
else:
tlvdata.send(session.connection, {'operation': 'stop',
@@ -651,11 +709,11 @@ def conserver_command(filehandle, localcommand):
else:
print("Unknown power state.]\r")
check_power_state()
#check_power_state()
elif localcommand[0] == '?':
print("help]\r")
print(". disconnect\r")
print(". exit console\r")
print("b break\r")
print("o reopen\r")
print("po power off\r")
@@ -744,6 +802,8 @@ if sys.stdout.isatty():
readline.parse_and_bind("tab: complete")
readline.parse_and_bind("set bell-style none")
dl = readline.get_completer_delims().replace('-', '')
readline.set_completer_delims(dl)
readline.set_completer(completer)
doexit = False
@@ -767,10 +827,11 @@ def check_power_state():
global powerstate, powertime
for rsp in session.read('/nodes/' + consolename + '/power/state'):
if type(rsp) == dict and 'state' in rsp:
powerstate = rsp['state']['value']
newpowerstate = rsp['state']['value']
powertime = time.time()
if powerstate == 'off':
sys.stdout.write("\r\n[powered off]\r\n")
if newpowerstate != powerstate and newpowerstate == 'off':
sys.stdout.write("\x1b[2J\x1b[;H[powered off]\r\n")
powerstate = newpowerstate
elif type(rsp) == dict and '_requestdone' in rsp:
break
elif type(rsp) == dict:
@@ -799,7 +860,12 @@ while inconsole or not doexit:
updatestatus(data)
continue
if data is not None:
sys.stdout.write(data)
try:
sys.stdout.write(data)
except IOError: # Some times circumstances are bad
# resort to byte at a time...
for d in data:
sys.stdout.write(d)
now = time.time()
if ('showtime' not in laststate or
(now // 60) != laststate['showtime'] // 60):
@@ -829,13 +895,15 @@ while inconsole or not doexit:
sys.stdout.write("\r\n[remote disconnected]\r\n")
break
else:
myinput = fh.read()
myinput = check_escape_seq(myinput, fh)
if myinput:
tlvdata.send(session.connection, myinput)
if powerstate is None or powertime < time.time() - 60: # Check powerstate every 60 seconds
check_power_state()
try:
myinput = fh.read()
myinput = check_escape_seq(myinput, fh)
if myinput:
tlvdata.send(session.connection, myinput)
except IOError:
pass
#if powerstate is None or powertime < time.time() - 60: # Check powerstate every 60 seconds
# check_power_state()
else:
currcommand = prompt()
try:

View File

@@ -19,8 +19,14 @@ __author__ = 'alin37'
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -29,13 +35,14 @@ if path.startswith('/opt'):
import confluent.client as client
argparser = optparse.OptionParser(
usage='''\n %prog [options] noderange [list of attributes] \
\n %prog [options] noderange attribute1=value1,attribute2=value,...
usage='''\n %prog [-b] noderange [list of attributes] \
\n %prog -c noderange <list of attributes> \
\n %prog noderange attribute1=value1 attribute2=value,...
\n ''')
argparser.add_option('-b', '--blame', action='store_true',
help='Show information about how attributes inherited')
argparser.add_option('-c', '--clear', action='store_true',
help='Clear variables')
help='Clear attributes')
(options, args) = argparser.parse_args()
@@ -46,7 +53,8 @@ try:
noderange = args[0]
nodelist = '/noderange/{0}/nodes/'.format(noderange)
except IndexError:
nodelist = '/nodes/'
argparser.print_help()
sys.exit(1)
session = client.Command()
exitcode = 0
@@ -54,7 +62,7 @@ exitcode = 0
nodetype="noderange"
if len(args) > 1:
if "=" in args[1]:
if "=" in args[1] or options.clear:
exitcode=client.updateattrib(session,args,nodetype, noderange, options)
try:
# setting user output to what the user inputs
@@ -65,6 +73,7 @@ if len(args) > 1:
showtype = 'current'
requestargs=args[2:]
else:
showtype = 'all'
requestargs=args[1:]
except:
pass

View File

@@ -17,8 +17,13 @@
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):

View File

@@ -18,8 +18,13 @@
from datetime import datetime as dt
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -28,7 +33,7 @@ if path.startswith('/opt'):
import confluent.client as client
argparser = optparse.OptionParser(
usage="Usage: %prog [options] noderange (clear)")
usage="Usage: %prog [options] noderange [clear]")
(options, args) = argparser.parse_args()
try:
noderange = args[0]

View File

@@ -17,7 +17,13 @@
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):

View File

@@ -19,8 +19,13 @@ __author__ = 'alin37'
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -57,6 +62,7 @@ exitcode = 0
#Sets attributes
if len(args) > 1:
showtype = 'all'
exitcode=client.updateattrib(session,args,nodetype, nodegroups, options)
try:
# setting user output to what the user inputs

View File

@@ -18,8 +18,13 @@
import codecs
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):

View File

@@ -17,7 +17,12 @@
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))

View File

@@ -17,7 +17,13 @@
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -77,6 +83,8 @@ try:
except IndexError:
argparser.print_help()
sys.exit(1)
if len(args) > 1 and args[1] == 'firm':
os.execlp('nodefirmware', 'nodefirmware', noderange)
try:
session = client.Command()
for res in session.read('/noderange/{0}/inventory/hardware/all/all'.format(

View File

@@ -19,8 +19,13 @@ __author__ = 'jjohnson2,alin37'
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -30,7 +35,8 @@ import confluent.client as client
def main():
argparser = optparse.OptionParser(
usage="Usage: %prog [options] noderange [list of attributes]")
usage="Usage: %prog noderange\n"
" or: %prog [options] noderange <nodeattribute>...")
argparser.add_option('-b', '--blame', action='store_true',
help='Show information about how attributes inherited')
(options, args) = argparser.parse_args()
@@ -59,4 +65,4 @@ def main():
sys.exit(exitcode)
if __name__ == '__main__':
main()
main()

View File

@@ -17,8 +17,13 @@
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -43,6 +48,9 @@ if len(sys.argv) > 2:
elif not sys.argv[2] in ('stat', 'state', 'status'):
setstate = sys.argv[2]
if setstate not in (None, 'on', 'off', 'shutdown', 'boot', 'reset'):
argparser.print_help()
sys.exit(1)
session = client.Command()
exitcode = 0
session.add_precede_key('oldstate')

View File

@@ -15,13 +15,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import optparse
import os
import select
import shlex
import signal
import subprocess
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -30,57 +36,75 @@ if path.startswith('/opt'):
import confluent.client as client
argparser = optparse.OptionParser(
usage="Usage: %prog node commandexpression",
epilog="Expressions are the same as in attributes, e.g. "
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
argparser.disable_interspersed_args()
(options, args) = argparser.parse_args()
if len(args) < 2:
argparser.print_help()
sys.exit(1)
c = client.Command()
cmdstr = " ".join(args[1:])
def run():
concurrentprocs = 168
# among other things, FD_SETSIZE limits. Besides, spawning too many
# processes can be unkind for the unaware on memory pressure and such...
argparser = optparse.OptionParser(
usage="Usage: %prog node commandexpression",
epilog="Expressions are the same as in attributes, e.g. "
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
argparser.disable_interspersed_args()
(options, args) = argparser.parse_args()
if len(args) < 2:
argparser.print_help()
sys.exit(1)
c = client.Command()
cmdstr = " ".join(args[1:])
nodeforpopen = {}
popens = []
for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]),
{'expression': cmdstr}):
ex = exp['databynode']
for node in ex:
cmd = ex[node]['value'].encode('utf-8')
cmdv = shlex.split(cmd)
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popens.append(nopen)
nodeforpopen[nopen] = node
currprocs = 0
all = set([])
pipedesc = {}
pendingexecs = deque()
all = set([])
pipedesc = {}
exitcode = 0
for pop in popens:
node = nodeforpopen[pop]
pipedesc[pop.stdout] = { 'node': node, 'popen': pop, 'type': 'stdout'}
pipedesc[pop.stderr] = {'node': node, 'popen': pop, 'type': 'stderr'}
all.add(pop.stdout)
all.add(pop.stderr)
rdy, _, _ = select.select(all, [], [], 10)
while all and rdy:
for r in rdy:
data = r.readline()
desc = pipedesc[r]
if data:
node = desc['node']
if desc['type'] == 'stdout':
sys.stdout.write('{0}: {1}'.format(node,data))
for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]),
{'expression': cmdstr}):
ex = exp['databynode']
for node in ex:
cmd = ex[node]['value'].encode('utf-8')
cmdv = shlex.split(cmd)
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
else:
sys.stderr.write('{0}: {1}'.format(node, data))
else:
pop = desc['popen']
ret = pop.poll()
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
if all:
rdy, _, _ = select.select(all, [], [], 10)
sys.exit(exitcode)
pendingexecs.append((node, cmdv))
exitcode = 0
rdy, _, _ = select.select(all, [], [], 10)
while all:
for r in rdy:
data = r.readline()
desc = pipedesc[r]
if data:
node = desc['node']
if desc['type'] == 'stdout':
sys.stdout.write('{0}: {1}'.format(node,data))
else:
sys.stderr.write('{0}: {1}'.format(node, data))
else:
pop = desc['popen']
ret = pop.poll()
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
if desc['type'] == 'stdout' and pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
if all:
rdy, _, _ = select.select(all, [], [], 10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
all.add(nopen.stdout)
all.add(nopen.stderr)
if __name__ == '__main__':
run()

View File

@@ -19,9 +19,14 @@ import csv
import datetime
import optparse
import os
import signal
import sys
import time
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):

View File

@@ -17,8 +17,13 @@
import optparse
import os
import signal
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):

View File

@@ -15,13 +15,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import optparse
import os
import select
import shlex
import signal
import subprocess
import sys
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
@@ -30,57 +36,75 @@ if path.startswith('/opt'):
import confluent.client as client
argparser = optparse.OptionParser(
usage="Usage: %prog node commandexpression",
epilog="Expressions are the same as in attributes, e.g. "
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
argparser.disable_interspersed_args()
(options, args) = argparser.parse_args()
if len(args) < 2:
argparser.print_help()
sys.exit(1)
c = client.Command()
cmdstr = " ".join(args[1:])
def run():
concurrentprocs = 168
# among other things, FD_SETSIZE limits. Besides, spawning too many
# processes can be unkind for the unaware on memory pressure and such...
argparser = optparse.OptionParser(
usage="Usage: %prog node commandexpression",
epilog="Expressions are the same as in attributes, e.g. "
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
argparser.disable_interspersed_args()
(options, args) = argparser.parse_args()
if len(args) < 2:
argparser.print_help()
sys.exit(1)
c = client.Command()
cmdstr = " ".join(args[1:])
nodeforpopen = {}
popens = []
for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]),
{'expression': cmdstr}):
ex = exp['databynode']
for node in ex:
cmd = ex[node]['value'].encode('utf-8')
cmdv = ['ssh', node] + shlex.split(cmd)
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popens.append(nopen)
nodeforpopen[nopen] = node
currprocs = 0
all = set([])
pipedesc = {}
pendingexecs = deque()
all = set([])
pipedesc = {}
exitcode = 0
for pop in popens:
node = nodeforpopen[pop]
pipedesc[pop.stdout] = { 'node': node, 'popen': pop, 'type': 'stdout'}
pipedesc[pop.stderr] = {'node': node, 'popen': pop, 'type': 'stderr'}
all.add(pop.stdout)
all.add(pop.stderr)
rdy, _, _ = select.select(all, [], [], 10)
while all and rdy:
for r in rdy:
data = r.readline()
desc = pipedesc[r]
if data:
node = desc['node']
if desc['type'] == 'stdout':
sys.stdout.write('{0}: {1}'.format(node,data))
for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]),
{'expression': cmdstr}):
ex = exp['databynode']
for node in ex:
cmd = ex[node]['value'].encode('utf-8')
cmdv = ['ssh', node] + shlex.split(cmd)
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
else:
sys.stderr.write('{0}: {1}'.format(node, data))
else:
pop = desc['popen']
ret = pop.poll()
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
if all:
rdy, _, _ = select.select(all, [], [], 10)
sys.exit(exitcode)
pendingexecs.append((node, cmdv))
exitcode = 0
rdy, _, _ = select.select(all, [], [], 10)
while all:
for r in rdy:
data = r.readline()
desc = pipedesc[r]
if data:
node = desc['node']
if desc['type'] == 'stdout':
sys.stdout.write('{0}: {1}'.format(node,data))
else:
sys.stderr.write('{0}: {1}'.format(node, data))
else:
pop = desc['popen']
ret = pop.poll()
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
if desc['type'] == 'stdout' and pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
if all:
rdy, _, _ = select.select(all, [], [], 10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
all.add(nopen.stdout)
all.add(nopen.stderr)
if __name__ == '__main__':
run()

View File

@@ -274,7 +274,7 @@ def attrrequested(attr, attrlist, seenattributes):
if candidate == attr:
seenattributes.add(truename)
return True
elif '.' not in candidate and attr.startswith(candidate + '.'):
elif attr.startswith(candidate + '.'):
seenattributes.add(truename)
return True
return False
@@ -309,12 +309,12 @@ def printattributes(session, requestargs, showtype, nodetype, noderange, options
'{2}'.format(node, attr,
currattr['broken'])
elif isinstance(currattr, list) or isinstance(currattr, tuple):
attrout = '{0}: {1}: {2}'.format(node, attr, ', '.join(map(str, currattr)))
attrout = '{0}: {1}: {2}'.format(node, attr, ','.join(map(str, currattr)))
elif isinstance(currattr, dict):
dictout = []
for k, v in currattr.items:
dictout.append("{0}={1}".format(k, v))
attrout = '{0}: {1}: {2}'.format(node, attr, ', '.join(map(str, dictout)))
attrout = '{0}: {1}: {2}'.format(node, attr, ','.join(map(str, dictout)))
else:
print ("CODE ERROR" + repr(attr))
@@ -367,28 +367,17 @@ def printgroupattributes(session, requestargs, showtype, nodetype, noderange, op
attrout = '{0}: {1}: *ERROR* BROKEN EXPRESSION: ' \
'{2}'.format(noderange, attr,
currattr['broken'])
elif 'expression' in currattr:
attrout = '{0}: {1}: (will derive from expression {2})'.format(noderange, attr, currattr['expression'])
elif isinstance(currattr, list) or isinstance(currattr, tuple):
attrout = '{0}: {1}: {2}'.format(noderange, attr, ', '.join(map(str, currattr)))
attrout = '{0}: {1}: {2}'.format(noderange, attr, ','.join(map(str, currattr)))
elif isinstance(currattr, dict):
dictout = []
for k, v in currattr.items:
dictout.append("{0}={1}".format(k, v))
attrout = '{0}: {1}: {2}'.format(noderange, attr, ', '.join(map(str, dictout)))
attrout = '{0}: {1}: {2}'.format(noderange, attr, ','.join(map(str, dictout)))
else:
print ("CODE ERROR" + repr(attr))
if options.blame or 'broken' in currattr:
blamedata = []
if 'inheritedfrom' in currattr:
blamedata.append('inherited from group {0}'.format(
currattr['inheritedfrom']
))
if 'expression' in currattr:
blamedata.append(
'derived from expression "{0}"'.format(
currattr['expression']))
if blamedata:
attrout += ' (' + ', '.join(blamedata) + ')'
print attrout
if not exitcode:
if requestargs:

View File

@@ -1,2 +1,4 @@
PATH=/opt/confluent/bin:$PATH
export PATH
MANPATH=/opt/confluent/share/man:$MANPATH
export MANPATH

View File

@@ -1,9 +1,10 @@
confetty(1) --- Interactive confluent client
confetty(8) --- Interactive confluent client
=================================================
## SYNOPSIS
`confetty`
`confetty`
`confetty <confetty command line>`
## DESCRIPTION
@@ -33,5 +34,3 @@ commands.
Start a console session indicated by **ELEMENT** (e.g. /nodes/n1/console/session)
* `rm` **ELEMENT**
Request removal of an element. (e.g. rm events/hardware/log clears log from a node)

View File

@@ -1,75 +1,80 @@
nodeattrib(1) -- List or change confluent nodes attributes
nodeattrib(8) -- List or change confluent nodes attributes
=========================================================
## SYNOPSIS
`nodeattrib` `noderange` [ current | all ]
`nodeattrib` `noderange` [-b] [<nodeattribute>...]
`nodeattrib` `noderange` [<nodeattribute1=value1> <nodeattribute2=value2> ...]
`nodeattrib` `noderange` [-c] [<nodeattribute1> <nodeattribute2=value2> ...]
`nodeattrib [-b] <noderange> [<nodeattribute>...]`
`nodeattrib <noderange> [<nodeattribute1=value1> <nodeattribute2=value2> ...]`
`nodeattrib -c <noderange> <nodeattribute1> <nodeattribute2> ...`
## DESCRIPTION
**nodeattrib** queries the confluent server to get information about nodes. In
**nodeattrib** manages the attributes of confluent nodes. In
the simplest form, it simply takes the given noderange(5) and lists the
matching nodes, one line at a time.
If a list of node attribute names are given, the value of those are also
displayed. If `-b` is specified, it will also display information on
how inherited and expression based attributes are defined. There is more
information on node attributes in nodeattributes(5) man page.
how inherited and expression based attributes are defined. Attributes can be
straightforward values, or an expression as documented in nodeattribexpressions(5).
For a full list of attributes, run `nodeattrib <node> all` against a node.
If `-c` is specified, this will set the nodeattribute to a null valid.
This is different from setting the value to an empty string.
Note that `nodeattrib <group>` will likely not provide the expected behavior.
See nodegroupattrib(8) command on how to manage attributes on a group level.
## OPTIONS
* `-b`, `--blame`:
Annotate inherited and expression based attributes to show their base value.
* `-c`, `--clear`:
Clear given nodeattributes since '' is not the same as empty
Clear specified nodeattributes
## EXAMPLES
* Listing matching nodes of a simple noderange:
`# nodeattrib n1-n2`
`n1`: console.method: ipmi
`n1`: hardwaremanagement.manager: 172.30.3.1
`n2`: console.method: ipmi
`n2`: hardwaremanagement.manager: 172.30.3.2
`# nodeattrib n1-n2`
`n1: console.method: ipmi`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n2: console.method: ipmi`
`n2: hardwaremanagement.manager: 172.30.3.2`
* Getting an attribute of nodes matching a noderange:
`# nodeattrib n1,n2 hardwaremanagement.manager`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n2: hardwaremanagement.manager: 172.30.3.2`
`# nodeattrib n1,n2 hardwaremanagement.manager`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n2: hardwaremanagement.manager: 172.30.3.2`
* Getting a group of attributes while determining what group defines them:
`# nodeattrib n1,n2 hardwaremanagement --blame`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n1: hardwaremanagement.method: ipmi (inherited from group everything)`
`n1: hardwaremanagement.switch: r8e1`
`n1: hardwaremanagement.switchport: 14`
`n2: hardwaremanagement.manager: 172.30.3.2`
`n2: hardwaremanagement.method: ipmi (inherited from group everything)`
`n2: hardwaremanagement.switch: r8e1`
`n2: hardwaremanagement.switchport: 2`
`# nodeattrib n1,n2 hardwaremanagement --blame`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n1: hardwaremanagement.method: ipmi (inherited from group everything)`
`n1: hardwaremanagement.switch: r8e1`
`n1: hardwaremanagement.switchport: 14`
`n2: hardwaremanagement.manager: 172.30.3.2`
`n2: hardwaremanagement.method: ipmi (inherited from group everything)`
`n2: hardwaremanagement.switch: r8e1`
`n2: hardwaremanagement.switchport: 2`
* Listing matching nodes of a simple noderange that are set:
`# nodeattrib n1-n2 current`
`n1`: console.method: ipmi
`n1`: hardwaremanagement.manager: 172.30.3.1
`n2`: console.method: ipmi
`n2`: hardwaremanagement.manager: 172.30.3.2
* Listing matching nodes of a simple noderange that are set:
`# nodeattrib n1-n2 current`
`n1: console.method: ipmi`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n2: console.method: ipmi`
`n2: hardwaremanagement.manager: 172.30.3.2`
* Change attribute on nodes of a simple noderange:
`# nodeattrib n1-n2 console.method=serial`
`n1`: console.method: serial
`n1`: hardwaremanagement.manager: 172.30.3.1
`n2`: console.method: serial
`n2`: hardwaremanagement.manager: 172.30.3.2
* Clear attribute on nodes of a simple noderange, if you want to retain the variable set the attribute to "":
`# nodeattrib n1-n2 -c console.method`
`# nodeattrib n1-n2 console.method`
Error: console.logging not a valid attribute
* Change attribute on nodes of a simple noderange:
`# nodeattrib n1-n2 console.method=serial`
`n1: console.method: serial`
`n1: hardwaremanagement.manager: 172.30.3.1`
`n2: console.method: serial`
`n2: hardwaremanagement.manager: 172.30.3.2`
* Clear attribute on nodes of a simple noderange, if you want to retain the variable set the attribute to "":
`# nodeattrib n1-n2 -c console.method`
`# nodeattrib n1-n2 console.method`
`n1: console.method: `
`n2: console.method: `
## SEE ALSO
nodegroupattrib(8), nodeattribexpressions(5)

View File

@@ -1,14 +1,14 @@
nodeconsole(1) -- Open a console to a confluent node
nodeconsole(8) -- Open a console to a confluent node
=====================================================
## SYNOPSIS
`nodeconsole` `node`
`nodeconsole <node>`
## DESCRIPTION
**nodeconsole** opens an interactive console session to a given node. This is the
text or serial console of a system. Exiting is done by hitting `Ctrl-e`, then `c`,
then `.`. Note that console output by default is additionally logged to
then `.`. Note that console output by default is additionally logged to
`/var/log/confluent/consoles/`**NODENAME**.
## ESCAPE SEQUENCE COMMANDS
@@ -20,11 +20,29 @@ keystroke will be interpreted as a command. The following commands are availabl
* `.`:
Exit the session and return to the command prompt
* `b`:
[send Break]
Send a break to the remote console when possible (some console plugins may not support this)
* `o`:
[reOpen]
Request confluent to disconnect and reconnect to console. For example if there is suspicion
that the console has gone inoperable, but would work if reconnected.
* `po`:
[Power Off]
Power off server immediately, without waiting for OS to shutdown
* `ps`:
[Power Shutdown]
Request OS shut down gracefully, and then power off
* `pb<ent>`:
[Power Boot]
Cause system to immediately boot, resetting or turning on as appropriate.
Hitting enter is required to execute the reboot rather than another pb sequence
* `pbs`:
[Power Boot Setup]
Request immediate boot ultimately landing in interactive firmware setup
* `pbn`:
[Power Boot Network]
Request immediate boot to network
* `?`:
Get a list of supported commands
* `<enter>`:
Abandon entering an escape sequence command
* `<ent>`:
Hit enter to skip entering a command at the escape prompt.

View File

@@ -1,10 +1,10 @@
nodelist(1) -- List confluent nodes and their attributes
nodelist(8) -- List confluent nodes and their attributes
=========================================================
## SYNOPSIS
`nodelist` `noderange`
`nodelist` `noderange` [-b] [<nodeattribute>...]
`nodelist <noderange>`
`nodelist <noderange> [-b] <nodeattribute>...`
## DESCRIPTION
@@ -45,4 +45,3 @@ information on node attributes in nodeattributes(5) man page.
`n2: hardwaremanagement.method: ipmi (inherited from group everything)`
`n2: hardwaremanagement.switch: r8e1`
`n2: hardwaremanagement.switchport: 2`

View File

@@ -1,9 +1,9 @@
nodesensors(1) --- Retrieve telemetry for sensors of confluent nodes
nodesensors(8) --- Retrieve telemetry for sensors of confluent nodes
====================================================================
## SYNOPSIS
`nodesensors` `noderange` [-c] [-i <interval>] [-n <samplecount>] [<sensor name or category>...]
`nodesensors <noderange> [-c] [-i <interval>] [-n <samplecount>] [<sensor name or category>...]`
## DESCRIPTION

View File

@@ -1,7 +1,13 @@
from setuptools import setup
import os
scriptlist = ['bin/{0}'.format(d) for d in os.listdir('bin/')]
data_files = [('/etc/profile.d', ['confluent_env.sh', 'confluent_env.csh'])]
try:
scriptlist = ['bin/{0}'.format(d) for d in os.listdir('bin/')]
data_files.append(('/opt/confluent/share/man/man5', ['man/man5/' + x for x in os.listdir('man/man5')]))
data_files.append(('/opt/confluent/share/man/man8', ['man/man8/' + x for x in os.listdir('man/man8')]))
except OSError:
pass
setup(
name='confluent_client',
@@ -11,5 +17,5 @@ setup(
url='http://xcat.sf.net/',
packages=['confluent'],
scripts=scriptlist,
data_files=[('/etc/profile.d', ['confluent_env.sh','confluent_env.csh'])],
data_files=data_files,
)

View File

@@ -1,5 +1,8 @@
#!/bin/sh
cd `dirname $0`
if [ -x ./makeman ]; then
./makeman
fi
./makesetup
VERSION=`cat VERSION`
PKGNAME=$(basename $(pwd))

View File

@@ -16,50 +16,69 @@
# limitations under the License.
#This defines the attributes of variou classes of things
#This defines the attributes of various classes of things
# 'nic', meant to be a nested structure under node
nic = {
'name': {
'description': 'Name in ip/ifconfig as desired by administrator',
},
'port': {
'description': 'Port that this nic connects to',
},
'switch': {
'description': 'Switch that this nic connects to',
},
'customhardwareaddress': {
'description': 'Mac address to push to nic',
},
'dnssuffix': {
'description': ('String to place after nodename, but before'
'Network.Domain to derive FQDN for this NIC'),
},
'hardwareaddress': {
'description': 'Active mac address on this nic (factory or custom)'
},
'ipaddresses': {
'description': 'Set of IPv4 and IPv6 addresses in CIDR format'
},
'pvid': {
'description': 'PVID of port on switch this nic connects to',
},
'mtu': {
'description': 'Requested MTU to configure on this interface',
},
'vlans': {
'description': 'Tagged VLANs to apply to nic/switch',
},
'dhcpv4enabled': {
'description': ('Whether DHCP should be attempted to acquire IPv4'
'address on this interface'),
},
'dhcpv6enabled': {
'description': ('Whether DHCP should be attempted to acquire IPv6'
'address on this interface'),
},
}
# changing mind on design, flattening to a single attribute, a *touch* less
# flexible at the top end, but much easier on the low end
# now net.<name>.attribute scheme
# similarly, leaning toward comma delimited ip addresses, since 99.99% of the
# time each nic will have one ip address
# vlan specification will need to be thought about a tad, each ip could be on
# a distinct vlan, but could have a vlan without an ip for sake of putting
# to a bridge. Current thought is
# vlans attribute would be comma delimited referring to the same index
# as addresses, with either 'native' or a number for vlan id
# the 'joinbridge' attribute would have some syntax like @<vlanid> to indicate
# joining only a vlan of the nic to the bridge
# 'joinbond' attribute would not support vlans.
#nic = {
# 'name': {
# 'description': 'Name in ip/ifconfig as desired by administrator',
# },
# 'biosdevname': {
# 'description': '"biosdevname" scheme to identify the adapter. If not'
# 'mac address match is preferred, then biosdevname, then'
# 'name.',
# },
# 'port': {
# 'description': 'Port that this nic connects to',
# },
# 'switch': {
# 'description': 'Switch that this nic connects to',
# },
# 'customhardwareaddress': {
# 'description': 'Mac address to push to nic',
# },
# 'dnssuffix': {
# 'description': ('String to place after nodename, but before'
# 'Network.Domain to derive FQDN for this NIC'),
# },
# 'hardwareaddress': {
# 'description': 'Active mac address on this nic (factory or custom)'
# },
# 'ipaddresses': {
# 'description': 'Set of IPv4 and IPv6 addresses in CIDR format'
# },
# 'pvid': {
# 'description': 'PVID of port on switch this nic connects to',
# },
# 'mtu': {
# 'description': 'Requested MTU to configure on this interface',
# },
# 'vlans': {
# 'description': 'Tagged VLANs to apply to nic/switch',
# },
# 'dhcpv4enabled': {
# 'description': ('Whether DHCP should be attempted to acquire IPv4'
# 'address on this interface'),
# },
# 'dhcpv6enabled': {
# 'description': ('Whether DHCP should be attempted to acquire IPv6'
# 'address on this interface'),
# },
#}
user = {
'password': {
@@ -71,7 +90,6 @@ user = {
node = {
'groups': {
'type': list,
'default': 'all',
'description': ('List of static groups for which this node is '
'considered a member'),
},
@@ -81,6 +99,72 @@ node = {
#'id': {
# 'description': ('Numeric identifier for node')
#},
# autonode is the feature of generating nodes based on connectivity to
# current node. In recursive autonode, for now we just allow endpoint to
# either be a server directly *or* a server enclosure. This precludes
# for the moment a concept of nested arbitrarily deep, but for now do this.
# hypothetically, one could imagine supporting an array and 'popping'
# names until reaching end. Not worth implementing at this point. If
# a traditional switch is added, it needs some care and feeding anyway.
# If a more exciting scheme presents itself, well we won't have to
# # own discovering switches anyway.
# 'autonode.servername': {
# 'description': ('Template for creating nodenames for automatic '
# 'creation of nodes detected as children of '
# 'this node. For example, a node in a server '
# 'enclosure bay or a server connected to a switch or '
# 'an enclosure manager connected to a switch. Certain '
# 'special template parameters are available and can '
# 'be used alongside usual config template directives. '
# '"discovered.nodenumber" will be replaced with the '
# 'bay or port number where the child node is connected.'
# ),
# },
# 'autonode.servergroups': {
# 'type': list,
# 'description': ('A list of groups to which discovered nodes will '
# 'belong to. As in autonode.servername, "discovered." '
# 'variable names will be substituted in special context')
# },
# 'autonode.enclosurename': {
# 'description': ('Template for creating nodenames when the discovered '
# 'node is an enclosure that will in turn generate nodes.'
# )
# },
# 'autonode.enclosuregroups': {
# 'type': list,
# 'description': ('A list of groups to which a discovered node will be'
# 'placed, presuming that node is an enclosure.')
# },
#For now, we consider this eventuality if needed. For now emphasize paradigm
# of group membership and see how far that goes.
# 'autonode.copyattribs': {
# 'type': list,
# 'description': ('A list of attributes to copy from the node generator '
# 'to the generated node. Expressions will be copied '
# 'over without evaluation, so will be evaluated '
# 'in the context of the generated node, rather than the'
# 'parent node. By default, an enclosure will copy over'
# 'autonode.servername, so that would not need to be '
# 'copied ')
# },
'discovery.policy': {
'description': 'Policy to use for auto-configuration of discovered '
'and identified nodes. Valid values are "manual", '
'"permissive", or "open". "manual" means nodes are '
'detected, but not autoconfigured until a user '
'approves. "permissive" indicates to allow discovery, '
'so long as the node has no existing public key. '
'"open" allows discovery even if a known public key '
'is already stored',
},
'info.note': {
'description': 'A field used for administrators to make arbitrary '
'notations about nodes. This is meant entirely for '
'human use and not programmatic use, so it can be '
'freeform text data without concern for issues in how '
'the server will process it.',
},
'location.room': {
'description': 'Room description for the node',
},
@@ -195,17 +279,6 @@ node = {
'description': 'The method used to perform operations such as power '
'control, get sensor data, get inventory, and so on. '
},
'hardwaremanagement.switch': {
'description': 'The switch to which the hardware manager is connected.'
' Only relevant if using switch based discovery via the'
' hardware manager (Lenovo IMMs and CMMs). Not '
'applicable to Lenovo Flex nodes.'
},
'hardwaremanagement.switchport': {
'description': 'The port of the switch that the hardware manager is '
'connected. See documentation of '
'hardwaremanagement.switch for more detail.'
},
'enclosure.manager': {
'description': "The management device for this node's chassis",
# 'appliesto': ['system'],
@@ -223,9 +296,32 @@ node = {
# 'id.serial': {
# 'description': 'The manufacturer serial number of node',
# },
# 'id.uuid': {
# 'description': 'The UUID of the node as presented in DMI',
# },
'id.uuid': {
'description': 'The UUID of the node as presented in DMI.',
},
'net.ipv4_gateway': {
'description': 'The IPv4 gateway to use if applicable. As is the '
'case for other net attributes, net.eth0.ipv4_gateway '
'and similar is accepted.'
},
# 'net.pxe': { 'description': 'Whether pxe will be used on this interface'
# TODO(jjohnson2): Above being 'true' will control whether mac addresses
# are stored in this nics attribute on pxe-client discovery, since
# pxe discovery is ambiguous for BMC and system on same subnet,
# or even both on the same port and same subnet
'net.switch': {
'description': 'An ethernet switch the node is connected to. Note '
'that net.* attributes may be indexed by interface. '
'For example instead of using net.switch, it is '
'possible to use net.eth0.switch and net.eth1.switch '
'or net.0.switch and net.1.switch to define multiple '
'sets of net connectivity associated with each other.'
},
'net.switchport': {
'description': 'The port on the switch that corresponds to this node. '
'See information on net.switch for more on the '
'flexibility of net.* attributes.'
},
# 'id.modelnumber': {
# 'description': 'The manufacturer dictated model number for the node',
# },

View File

@@ -72,6 +72,8 @@ import confluent.exceptions as exc
import copy
import cPickle
import errno
import eventlet
import fnmatch
import json
import operator
import os
@@ -151,6 +153,29 @@ def _format_key(key, password=None):
return {"unencryptedvalue": key}
def _do_notifier(cfg, watcher, callback):
try:
callback(nodeattribs=watcher['nodeattrs'], configmanager=cfg)
except Exception:
logException()
def logException():
global tracelog
if tracelog is None:
tracelog = confluent.log.Logger('trace')
tracelog.log(traceback.format_exc(),
ltype=confluent.log.DataTypes.event,
event=confluent.log.Events.stacktrace)
def _do_add_watcher(watcher, added, configmanager):
try:
watcher(added=added, deleting=[], configmanager=configmanager)
except Exception:
logException()
def init_masterkey(password=None):
global _masterkey
global _masterintegritykey
@@ -198,6 +223,26 @@ def decrypt_value(cryptvalue,
return value[0:-padsize]
def attribute_is_invalid(attrname, attrval):
if attrname.startswith('custom.'):
# No type checking or name checking is provided for custom,
# it's not possible
return False
if attrname.startswith('net.'):
# For net.* attribtues, split on the dots and put back together
# longer term we might want a generic approach, but
# right now it's just net. attributes
netattrparts = attrname.split('.')
attrname = netattrparts[0] + '.' + netattrparts[-1]
if attrname not in allattributes.node:
# Otherwise, it must be in the allattributes key list
return True
if 'type' in allattributes.node[attrname]:
if not isinstance(attrval, allattributes.node[attrname]['type']):
# provide type checking for attributes with a specific type
return True
return False
def crypt_value(value,
key=None,
integritykey=None):
@@ -372,8 +417,8 @@ class _ExpressionFormat(string.Formatter):
if optype not in self._supported_ops:
raise Exception("Unsupported operation")
op = self._supported_ops[optype]
return op(self._handle_ast_node(node.left),
self._handle_ast_node(node.right))
return op(int(self._handle_ast_node(node.left)),
int(self._handle_ast_node(node.right)))
def _decode_attribute(attribute, nodeobj, formatter=None, decrypt=False):
@@ -551,7 +596,9 @@ class ConfigManager(object):
def watch_attributes(self, nodes, attributes, callback):
"""
Watch a list of attributes for changes on a list of nodes
Watch a list of attributes for changes on a list of nodes. The
attributes may be literal, or a filename style wildcard like
'net*.switch'
:param nodes: An iterable of node names to be watching
:param attributes: An iterable of attribute names to be notified about
@@ -579,6 +626,10 @@ class ConfigManager(object):
}
else:
attribwatchers[node][attribute][notifierid] = callback
if '*' in attribute:
currglobs = attribwatchers[node].get('_attrglobs', set([]))
currglobs.add(attribute)
attribwatchers[node]['_attrglobs'] = currglobs
return notifierid
def watch_nodecollection(self, callback):
@@ -786,9 +837,11 @@ class ConfigManager(object):
if decrypt is None:
decrypt = self.decrypt
retdict = {}
relattribs = attributes
if isinstance(nodelist, str) or isinstance(nodelist, unicode):
nodelist = [nodelist]
if isinstance(attributes, str) or isinstance(attributes, unicode):
attributes = [attributes]
relattribs = attributes
for node in nodelist:
if node not in self._cfgstore['nodes']:
continue
@@ -800,6 +853,10 @@ class ConfigManager(object):
if attribute.startswith('_'):
# skip private things
continue
if '*' in attribute:
for attr in fnmatch.filter(list(cfgnodeobj), attribute):
nodeobj[attr] = _decode_attribute(attr, cfgnodeobj,
decrypt=decrypt)
if attribute not in cfgnodeobj:
continue
# since the formatter is not passed in, the calculator is
@@ -916,11 +973,8 @@ class ConfigManager(object):
raise ValueError("{0} group does not exist".format(group))
for attr in attribmap[group].iterkeys():
if (attr not in ('nodes', 'noderange') and
(attr not in allattributes.node or
('type' in allattributes.node[attr] and
not isinstance(attribmap[group][attr],
allattributes.node[attr]['type'])))):
raise ValueError("nodes attribute is invalid")
attribute_is_invalid(attr, attribmap[group][attr])):
raise ValueError("{0} attribute is invalid".format(attr))
if attr == 'nodes':
if not isinstance(attribmap[group][attr], list):
if type(attribmap[group][attr]) is unicode or type(attribmap[group][attr]) is str:
@@ -1019,7 +1073,7 @@ class ConfigManager(object):
return
notifdata = {}
attribwatchers = self._attribwatchers[self.tenant]
for node in nodeattrs.iterkeys():
for node in nodeattrs:
if node not in attribwatchers:
continue
attribwatcher = attribwatchers[node]
@@ -1032,10 +1086,21 @@ class ConfigManager(object):
# to deletion, to make all watchers aware of the removed
# node and take appropriate action
checkattrs = attribwatcher
globattrs = {}
for attrglob in attribwatcher.get('_attrglobs', []):
for matched in fnmatch.filter(list(checkattrs), attrglob):
globattrs[matched] = attrglob
for attrname in checkattrs:
if attrname not in attribwatcher:
if attrname == '_attrglobs':
continue
for notifierid in attribwatcher[attrname].iterkeys():
watchkey = attrname
# the attrib watcher could still have a glob
if attrname not in attribwatcher:
if attrname in globattrs:
watchkey = globattrs[attrname]
else:
continue
for notifierid in attribwatcher[watchkey]:
if notifierid in notifdata:
if node in notifdata[notifierid]['nodeattrs']:
notifdata[notifierid]['nodeattrs'][node].append(
@@ -1046,18 +1111,12 @@ class ConfigManager(object):
else:
notifdata[notifierid] = {
'nodeattrs': {node: [attrname]},
'callback': attribwatcher[attrname][notifierid]
'callback': attribwatcher[watchkey][notifierid]
}
for watcher in notifdata.itervalues():
callback = watcher['callback']
try:
callback(nodeattribs=watcher['nodeattrs'], configmanager=self)
except Exception:
global tracelog
if tracelog is None:
tracelog = confluent.log.Logger('trace')
tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
eventlet.spawn_n(_do_notifier, self, watcher, callback)
def del_nodes(self, nodes):
if self.tenant in self._nodecollwatchers:
@@ -1154,11 +1213,7 @@ class ConfigManager(object):
if ('everything' in self._cfgstore['nodegroups'] and
'everything' not in attribmap[node]['groups']):
attribmap[node]['groups'].append('everything')
elif (attrname not in allattributes.node or
('type' in allattributes.node[attrname] and
not isinstance(
attrval,
allattributes.node[attrname]['type']))):
elif attribute_is_invalid(attrname, attrval):
errstr = "{0} attribute on node {1} is invalid".format(
attrname, node)
raise ValueError(errstr)
@@ -1206,7 +1261,7 @@ class ConfigManager(object):
if self.tenant in self._nodecollwatchers:
nodecollwatchers = self._nodecollwatchers[self.tenant]
for watcher in nodecollwatchers.itervalues():
watcher(added=newnodes, deleting=[], configmanager=self)
eventlet.spawn_n(_do_add_watcher, watcher, newnodes, self)
self._bg_sync_to_file()
#TODO: wait for synchronization to suceed/fail??)

View File

@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,6 +21,7 @@
# we track nodes that are actively being logged, watched, or have attached
# there should be no more than one handler per node
import codecs
import collections
import confluent.config.configmanager as configmodule
import confluent.exceptions as exc
@@ -29,6 +31,7 @@ import confluent.core as plugin
import confluent.util as util
import eventlet
import eventlet.event
import pyte
import random
import time
import traceback
@@ -37,6 +40,100 @@ _handled_consoles = {}
_tracelog = None
try:
range = xrange
except NameError:
pass
pytecolors2ansi = {
'black': 0,
'red': 1,
'green': 2,
'brown': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
'default': 9,
}
# might be able to use IBMPC map from pyte charsets,
# in that case, would have to mask out certain things (like ESC)
# in the same way that Screen's draw method would do
# for now at least get some of the arrows in there (note ESC is one
# of those arrows... so skip it...
ansichars = dict(zip((0x18, 0x19), u'\u2191\u2193'))
def _utf8_normalize(data, shiftin, decoder):
# first we give the stateful decoder a crack at the byte stream,
# we may come up empty in the event of a partial multibyte
try:
data = decoder.decode(data)
except UnicodeDecodeError:
# first order of business is to reset the state of
# the decoder to a clean one, so we can switch back to utf-8
# when things change, for example going from an F1 setup menu stuck
# in the old days to a modern platform using utf-8
decoder.setstate(codecs.getincrementaldecoder('utf-8')().getstate())
# Ok, so we have something that is not valid UTF-8,
# our next stop is to try CP437. We don't try incremental
# decode, since cp437 is single byte
# replace is silly here, since there does not exist invalid c437,
# but just in case
data = data.decode('cp437', 'replace')
# Finally, the low part of ascii is valid utf-8, but we are going to be
# more interested in the cp437 versions (since this is console *output*
# not input
if shiftin is None:
data = data.translate(ansichars)
return data.encode('utf-8')
def pytechars2line(chars, maxlen=None):
line = '\x1b[m' # start at default params
lb = False # last bold
li = False # last italic
lu = False # last underline
ls = False # last strikethrough
lr = False # last reverse
lfg = 'default' # last fg color
lbg = 'default' # last bg color
hasdata = False
len = 1
for charidx in range(maxlen):
char = chars[charidx]
csi = []
if char.fg != lfg:
csi.append(30 + pytecolors2ansi[char.fg])
lfg = char.fg
if char.bg != lbg:
csi.append(40 + pytecolors2ansi[char.bg])
lbg = char.bg
if char.bold != lb:
lb = char.bold
csi.append(1 if lb else 22)
if char.italics != li:
li = char.italics
csi.append(3 if li else 23)
if char.underscore != lu:
lu = char.underscore
csi.append(4 if lu else 24)
if char.strikethrough != ls:
ls = char.strikethrough
csi.append(9 if ls else 29)
if char.reverse != lr:
lr = char.reverse
csi.append(7 if lr else 27)
if csi:
line += b'\x1b[' + b';'.join(['{0}'.format(x) for x in csi]) + b'm'
if not hasdata and char.data.encode('utf-8').rstrip():
hasdata = True
line += char.data.encode('utf-8')
if maxlen and len >= maxlen:
break
len += 1
return line, hasdata
class ConsoleHandler(object):
_plugin_path = '/nodes/{0}/_console/session'
@@ -44,6 +141,7 @@ class ConsoleHandler(object):
_genwatchattribs = frozenset(('console.method', 'console.logging'))
def __init__(self, node, configmanager):
self.clearpending = False
self._dologging = True
self._isondemand = False
self.error = None
@@ -51,14 +149,15 @@ class ConsoleHandler(object):
self.node = node
self.connectstate = 'unconnected'
self._isalive = True
self.buffer = bytearray()
self.buffer = pyte.Screen(100, 31)
self.termstream = pyte.ByteStream()
self.termstream.attach(self.buffer)
self.livesessions = set([])
self.utf8decoder = codecs.getincrementaldecoder('utf-8')()
if self._logtobuffer:
self.logger = log.Logger(node, console=True,
tenant=configmanager.tenant)
(text, termstate, timestamp) = self.logger.read_recent_text(8192)
else:
(text, termstate, timestamp) = ('', 0, False)
(text, termstate, timestamp) = (b'', 0, False)
# when reading from log file, we will use wall clock
# it should usually match walltime.
self.lasttime = 0
@@ -70,7 +169,7 @@ class ConsoleHandler(object):
# wall clock has gone backwards, use current time as best
# guess
self.lasttime = util.monotonic_time()
self.buffer += text
self.clearbuffer()
self.appmodedetected = False
self.shiftin = None
self.reconnect = None
@@ -91,6 +190,16 @@ class ConsoleHandler(object):
self.connectstate = 'connecting'
eventlet.spawn(self._connect)
def feedbuffer(self, data):
try:
self.termstream.feed(data)
except StopIteration: # corrupt parser state, start over
self.termstream = pyte.ByteStream()
self.termstream.attach(self.buffer)
except Exception:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
def check_isondemand(self):
self._dologging = True
attrvalue = self.cfgmgr.get_node_attributes(
@@ -157,10 +266,18 @@ class ConsoleHandler(object):
else:
self._console.ping()
def clearbuffer(self):
self.feedbuffer(
'\x1bc[no replay buffer due to console.logging attribute set to '
'none or interactive,\r\nconnection loss, or service restart]')
self.clearpending = True
def _disconnect(self):
if self.connectionthread:
self.connectionthread.kill()
self.connectionthread = None
# clear the terminal buffer when disconnected
self.clearbuffer()
if self._console:
self.log(
logdata='console disconnected', ltype=log.DataTypes.event,
@@ -200,6 +317,7 @@ class ConsoleHandler(object):
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
if not isinstance(self._console, conapi.Console):
self.clearbuffer()
self.connectstate = 'unconnected'
self.error = 'misconfigured'
self._send_rcpts({'connectstate': self.connectstate,
@@ -219,6 +337,7 @@ class ConsoleHandler(object):
try:
self._console.connect(self.get_console_output)
except exc.TargetEndpointBadCredentials:
self.clearbuffer()
self.error = 'badcredentials'
self.connectstate = 'unconnected'
self._send_rcpts({'connectstate': self.connectstate,
@@ -228,6 +347,7 @@ class ConsoleHandler(object):
self.reconnect = eventlet.spawn_after(retrytime, self._connect)
return
except exc.TargetEndpointUnreachable:
self.clearbuffer()
self.error = 'unreachable'
self.connectstate = 'unconnected'
self._send_rcpts({'connectstate': self.connectstate,
@@ -237,6 +357,7 @@ class ConsoleHandler(object):
self.reconnect = eventlet.spawn_after(retrytime, self._connect)
return
except Exception:
self.clearbuffer()
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
self.error = 'unknown'
@@ -257,6 +378,7 @@ class ConsoleHandler(object):
self._send_rcpts({'connectstate': self.connectstate})
def _got_disconnected(self):
self.clearbuffer()
if self.connectstate != 'unconnected':
self.connectstate = 'unconnected'
self.log(
@@ -278,12 +400,6 @@ class ConsoleHandler(object):
self.connectionthread.kill()
self.connectionthread = None
def flushbuffer(self):
# Logging is handled in a different stream
# this buffer is now just for having screen redraw on
# connect
self.buffer = bytearray(self.buffer[-8192:])
def get_console_output(self, data):
# Spawn as a greenthread, return control as soon as possible
# to the console object
@@ -354,19 +470,18 @@ class ConsoleHandler(object):
eventdata |= 2
self.log(data, eventdata=eventdata)
self.lasttime = util.monotonic_time()
if isinstance(data, bytearray) or isinstance(data, bytes):
self.buffer += data
else:
self.buffer += data.encode('utf-8')
self.feedbuffer(data)
# TODO: analyze buffer for registered events, examples:
# panics
# certificate signing request
if len(self.buffer) > 16384:
self.flushbuffer()
self._send_rcpts(data)
if self.clearpending:
self.clearpending = False
self.feedbuffer(b'\x1bc')
self._send_rcpts(b'\x1bc')
self._send_rcpts(_utf8_normalize(data, self.shiftin, self.utf8decoder))
def _send_rcpts(self, data):
for rcpt in self.livesessions:
for rcpt in list(self.livesessions):
try:
rcpt.data_handler(data)
except: # No matter the reason, advance to next recipient
@@ -385,7 +500,26 @@ class ConsoleHandler(object):
'connectstate': self.connectstate,
'clientcount': len(self.livesessions),
}
retdata = ''
retdata = b'\x1b[H\x1b[J' # clear screen
pendingbl = b'' # pending blank lines
maxlen = 0
for line in self.buffer.display:
line = line.rstrip()
if len(line) > maxlen:
maxlen = len(line)
for line in range(self.buffer.lines):
nline, notblank = pytechars2line(self.buffer.buffer[line], maxlen)
if notblank:
if pendingbl:
retdata += pendingbl
pendingbl = b''
retdata += nline + '\r\n'
else:
pendingbl += nline + '\r\n'
if len(retdata) > 6:
retdata = retdata[:-2] # remove the last \r\n
retdata += b'\x1b[{0};{1}H'.format(self.buffer.cursor.y + 1,
self.buffer.cursor.x + 1)
if self.shiftin is not None: # detected that terminal requested a
# shiftin character set, relay that to the terminal that cannected
retdata += '\x1b)' + self.shiftin
@@ -393,27 +527,16 @@ class ConsoleHandler(object):
retdata += '\x1b[?1h'
else:
retdata += '\x1b[?1l'
# an alternative would be to emulate a VT100 to know what the
# whole screen would look like
# this is one scheme to clear screen, move cursor then clear
bufidx = self.buffer.rfind('\x1b[H\x1b[J')
if bufidx >= 0:
return retdata + str(self.buffer[bufidx:]), connstate
# another scheme is the 2J scheme
bufidx = self.buffer.rfind('\x1b[2J')
if bufidx >= 0:
# there was some sort of clear screen event
# somewhere in the buffer, replay from that point
# in hopes that it reproduces the screen
return retdata + str(self.buffer[bufidx:]), connstate
else:
# we have no indication of last erase, play back last kibibyte
# to give some sense of context anyway
return retdata + str(self.buffer[-1024:]), connstate
return retdata, connstate
def write(self, data):
if self.connectstate == 'connected':
self._console.write(data)
try:
self._console.write(data)
except Exception:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
self._got_disconnected()
def disconnect_node(node, configmanager):

View File

@@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
# Copyright 2015-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,11 +33,14 @@
# functions. Console is special and just get's passed through
# see API.txt
import confluent
import confluent.alerts as alerts
import confluent.config.attributes as attrscheme
import confluent.discovery.core as disco
import confluent.interface.console as console
import confluent.exceptions as exc
import confluent.messages as msg
import confluent.networking.macmap as macmap
import confluent.noderange as noderange
try:
import confluent.shellmodule as shellmodule
@@ -100,7 +103,8 @@ def load_plugins():
sys.path.pop(1)
rootcollections = ['noderange/', 'nodes/', 'nodegroups/', 'users/', 'events/']
rootcollections = ['discovery/', 'events/', 'networking/',
'noderange/', 'nodes/', 'nodegroups/', 'users/', 'version']
class PluginRoute(object):
@@ -344,11 +348,14 @@ def delete_nodegroup_collection(collectionpath, configmanager):
raise Exception("Not implemented")
def delete_node_collection(collectionpath, configmanager):
def delete_node_collection(collectionpath, configmanager, isnoderange):
if len(collectionpath) == 2: # just node
node = collectionpath[-1]
configmanager.del_nodes([node])
yield msg.DeletedResource(node)
nodes = [collectionpath[-1]]
if isnoderange:
nodes = noderange.NodeRange(nodes[0], configmanager).nodes
configmanager.del_nodes(nodes)
for node in nodes:
yield msg.DeletedResource(node)
else:
raise Exception("Not implemented")
@@ -392,6 +399,7 @@ def create_group(inputdata, configmanager):
configmanager.add_group_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
yield msg.CreatedResource(groupname)
def create_node(inputdata, configmanager):
@@ -405,6 +413,25 @@ def create_node(inputdata, configmanager):
configmanager.add_node_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
yield msg.CreatedResource(nodename)
def create_noderange(inputdata, configmanager):
try:
noder = inputdata['name']
del inputdata['name']
attribmap = {}
for node in noderange.NodeRange(noder).nodes:
attribmap[node] = inputdata
except KeyError:
raise exc.InvalidArgumentException('name not specified')
try:
configmanager.add_node_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
for node in attribmap:
yield msg.CreatedResource(node)
def enumerate_collections(collections):
@@ -419,7 +446,7 @@ def handle_nodegroup_request(configmanager, inputdata,
if len(pathcomponents) < 2:
if operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
create_group(inputdata.attribs, configmanager)
return create_group(inputdata.attribs, configmanager)
allgroups = list(configmanager.get_groups())
try:
allgroups.sort(key=noderange.humanify_nodename)
@@ -458,6 +485,16 @@ def handle_nodegroup_request(configmanager, inputdata,
raise Exception("unknown case encountered")
class BadPlugin(object):
def __init__(self, node, plugin):
self.node = node
self.plugin = plugin
def error(self, *args, **kwargs):
yield msg.ConfluentNodeError(
self.node, self.plugin + ' is not a supported plugin')
def handle_node_request(configmanager, inputdata, operation,
pathcomponents, autostrip=True):
iscollection = False
@@ -489,11 +526,14 @@ def handle_node_request(configmanager, inputdata, operation,
# this is enumerating a list of nodes or just empty noderange
if isnoderange and operation == "retrieve":
return iterate_collections([])
elif isnoderange and operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
return create_noderange(inputdata.attribs, configmanager)
elif isnoderange or operation == "delete":
raise exc.InvalidArgumentException()
if operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
create_node(inputdata.attribs, configmanager)
return create_node(inputdata.attribs, configmanager)
allnodes = list(configmanager.list_nodes())
try:
allnodes.sort(key=noderange.humanify_nodename)
@@ -524,7 +564,8 @@ def handle_node_request(configmanager, inputdata, operation,
raise exc.InvalidArgumentException('Custom interface required for resource')
if iscollection:
if operation == "delete":
return delete_node_collection(pathcomponents, configmanager)
return delete_node_collection(pathcomponents, configmanager,
isnoderange)
elif operation == "retrieve":
return enumerate_node_collection(pathcomponents, configmanager)
else:
@@ -561,7 +602,11 @@ def handle_node_request(configmanager, inputdata, operation,
if attrname in nodeattr[node]:
plugpath = nodeattr[node][attrname]['value']
if plugpath is not None:
hfunc = getattr(pluginmap[plugpath], operation)
try:
hfunc = getattr(pluginmap[plugpath], operation)
except KeyError:
nodesbyhandler[BadPlugin(node, plugpath).error] = [node]
continue
if hfunc in nodesbyhandler:
nodesbyhandler[hfunc].append(node)
else:
@@ -588,6 +633,14 @@ def handle_node_request(configmanager, inputdata, operation,
# return stripnode(passvalues[0], nodes[0])
def handle_discovery(pathcomponents, operation, configmanager, inputdata):
if pathcomponents[0] == 'detected':
pass
def handle_discovery(pathcomponents, operation, configmanager, inputdata):
if pathcomponents[0] == 'detected':
pass
def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
"""Given a full path request, return an object.
@@ -612,6 +665,14 @@ def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
# single node request of some sort
return handle_node_request(configmanager, inputdata,
operation, pathcomponents, autostrip)
elif pathcomponents[0] == 'discovery':
return disco.handle_api_request(
configmanager, inputdata, operation, pathcomponents)
elif pathcomponents[0] == 'networking':
return macmap.handle_api_request(
configmanager, inputdata, operation, pathcomponents)
elif pathcomponents[0] == 'version':
return (msg.Attributes(kv={'version': confluent.__version__}),)
elif pathcomponents[0] == 'users':
# TODO: when non-administrator accounts exist,
# they must only be allowed to see their own user
@@ -646,5 +707,8 @@ def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
raise exc.NotFoundException()
if operation == 'update':
return alerts.decode_alert(inputdata, configmanager)
elif pathcomponents[0] == 'discovery':
return handle_discovery(pathcomponents[1:], operation, configmanager,
inputdata)
else:
raise exc.NotFoundException()

View File

@@ -81,6 +81,12 @@ class GlobalConfigError(ConfluentException):
apierrorstr = 'Global configuration contains an error'
class TargetResourceUnavailable(ConfluentException):
# This is meant for scenarios like asking to read a sensor that is
# currently unavailable. This may be a persistent or transient state
apierrocode = 503
apierrorstr = 'Target Resource Unavailable'
class PubkeyInvalid(ConfluentException):
apierrorcode = 502
apierrorstr = '502 - Invalid certificate or key on target'

View File

@@ -743,6 +743,7 @@ tracelog = None
def log(logdata=None, ltype=None, event=0, eventdata=None):
global globaleventlog
if globaleventlog is None:
globaleventlog = Logger('events')
globaleventlog.log(logdata, ltype, event, eventdata)

View File

@@ -39,6 +39,7 @@ except ImportError:
#On platforms without pwd, give up on the sockapi in general and be http
#only for now
pass
import confluent.discovery.core as disco
import eventlet
dbgif = False
if map(int, (eventlet.__version__.split('.'))) > [0, 18]:
@@ -238,6 +239,7 @@ def run():
sock_bind_host, sock_bind_port = _get_connector_config('socket')
webservice = httpapi.HttpApi(http_bind_host, http_bind_port)
webservice.start()
disco.start_detection()
try:
sockservice = sockapi.SockApi(sock_bind_host, sock_bind_port)
sockservice.start()

View File

@@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015-2016 Lenovo
# Copyright 2015-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -194,6 +194,17 @@ class ConfluentNodeError(object):
raise Exception(self.error)
class ConfluentResourceUnavailable(ConfluentNodeError):
apicode = 503
def __init__(self, node, errstr='Unavailable'):
self.node = node
self.error = errstr
def strip_node(self, node):
raise exc.TargetResourceUnavailable()
class ConfluentTargetTimeout(ConfluentNodeError):
apicode = 504
@@ -228,9 +239,19 @@ class ConfluentTargetInvalidCredentials(ConfluentNodeError):
class DeletedResource(ConfluentMessage):
notnode = True
def __init__(self, resource):
self.kvpairs = {}
self.kvpairs = {'deleted': resource}
class CreatedResource(ConfluentMessage):
notnode = True
def __init__(self, resource):
self.kvpairs = {'created': resource}
class AssignedResource(ConfluentMessage):
notnode = True
def __init__(self, resource):
self.kvpairs = {'assigned': resource}
class ConfluentChoiceMessage(ConfluentMessage):
valid_values = set()
@@ -325,9 +346,16 @@ class ChildCollection(LinkRelation):
extension)
# TODO(jjohnson2): enhance the following to support expressions:
# InputNetworkConfiguration
# InputMCI
# InputDomainName
# InputNTPServer
def get_input_message(path, operation, inputdata, nodes=None, multinode=False):
if path[0] == 'power' and path[1] == 'state' and operation != 'retrieve':
return InputPowerMessage(path, nodes, inputdata)
elif path == ['attributes', 'expression']:
return InputExpression(path, inputdata, nodes)
elif path[0] in ('attributes', 'users') and operation != 'retrieve':
return InputAttributes(path, inputdata, nodes)
elif path == ['boot', 'nextdevice'] and operation != 'retrieve':
@@ -387,7 +415,47 @@ class InputAlertData(ConfluentMessage):
return self.alertparams
class InputExpression(ConfluentMessage):
# This is specifically designed to suppress the expansion of an expression
# so that it can make it intact to the pertinent configmanager function
def __init__(self, path, inputdata, nodes=None):
self.nodeattribs = {}
nestedmode = False
if not inputdata:
raise exc.InvalidArgumentException('no request data provided')
if nodes is None:
self.attribs = inputdata
return
for node in nodes:
if node in inputdata:
nestedmode = True
self.nodeattribs[node] = inputdata[node]
if nestedmode:
for key in inputdata:
if key not in nodes:
raise exc.InvalidArgumentException
else:
for node in nodes:
self.nodeattribs[node] = inputdata
def get_attributes(self, node):
if node not in self.nodeattribs:
return {}
nodeattr = deepcopy(self.nodeattribs[node])
return nodeattr
class InputAttributes(ConfluentMessage):
# This is particularly designed for attributes, where a simple string
# should become either a string value or a dict with {'expression':} to
# preserve the client provided expression for posterity, rather than
# immediate consumption.
# for things like node configuration or similar, a different class is
# appropriate since it nedes to immediately expand an expression.
# with that class, the 'InputExpression' and calling code in attributes.py
# might be deprecated in favor of the generic expression expander
# and a small function in attributes.py to reflect the expansion back
# to the client
def __init__(self, path, inputdata, nodes=None):
self.nodeattribs = {}
nestedmode = False
@@ -468,12 +536,13 @@ class InputCredential(ConfluentMessage):
if len(path) == 4:
inputdata['uid'] = path[-1]
# if the operation is 'create' check if all fields are present
elif ('uid' not in inputdata or 'privilege_level' not in inputdata or
'username' not in inputdata or 'password' not in inputdata):
raise exc.InvalidArgumentException('all fields are required')
if 'uid' not in inputdata:
raise exc.InvalidArgumentException('uid is missing')
missingattrs = []
for attrname in ('uid', 'privilege_level', 'username', 'password'):
if attrname not in inputdata:
missingattrs.append(attrname)
if missingattrs:
raise exc.InvalidArgumentException(
'Required fields missing: {0}'.format(','.join(missingattrs)))
if (isinstance(inputdata['uid'], str) and
not inputdata['uid'].isdigit()):
raise exc.InvalidArgumentException('uid must be a number')

View File

@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016 Lenovo
# Copyright 2016-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,16 +31,24 @@
# this module will provide mac to switch and full 'ifName' label
# This functionality is restricted to the null tenant
if __name__ == '__main__':
import sys
import confluent.config.configmanager as cfm
import confluent.exceptions as exc
import confluent.log as log
import confluent.messages as msg
import confluent.snmputil as snmp
import confluent.util as util
from eventlet.greenpool import GreenPool
import eventlet
import eventlet.semaphore
import re
_macmap = {}
_macsbyswitch = {}
_nodesbymac = {}
_switchportmap = {}
vintage = None
_whitelistnames = (
@@ -90,7 +98,19 @@ def _namesmatch(switchdesc, userdesc):
def _map_switch(args):
try:
return _map_switch_backend(args)
except UnicodeError:
log.log({'error': "Cannot resolve switch '{0}' to an address".format(
args[0])})
except exc.TargetEndpointUnreachable:
log.log({'error': "Timeout or bad SNMPv1 community string trying to "
"reach switch '{0}'".format(
args[0])})
except exc.TargetEndpointBadCredentials:
log.log({'error': "Bad SNMPv3 credentials for \'{0}\'".format(
args[0])})
except Exception as e:
log.log({'error': 'Unexpected condition trying to reach switch "{0}"'
' check trace log for more'.format(args[0])})
log.logtrace()
@@ -120,7 +140,13 @@ def _map_switch_backend(args):
# fallback if ifName is empty
#
global _macmap
switch, password, user = args
if len(args) == 3:
switch, password, user = args
if not user:
user = None
else:
switch, password = args
user = None
haveqbridge = False
mactobridge = {}
conn = snmp.Session(switch, password, user)
@@ -135,12 +161,24 @@ def _map_switch_backend(args):
)
mactobridge[macaddr] = int(bridgeport)
if not haveqbridge:
raise exc.NotImplementedException('TODO: Bridge-MIB without QBRIDGE')
for vb in conn.walk('1.3.6.1.2.1.17.4.3.1.2'):
oid, bridgeport = vb
if not bridgeport:
continue
oid = str(oid).rsplit('.', 6)
macaddr = '{0:02x}:{1:02x}:{2:02x}:{3:02x}:{4:02x}:{5:02x}'.format(
*([int(x) for x in oid[-6:]])
)
mactobridge[macaddr] = int(bridgeport)
bridgetoifmap = {}
for vb in conn.walk('1.3.6.1.2.1.17.1.4.1.2'):
bridgeport, ifidx = vb
bridgeport = int(str(bridgeport).rsplit('.', 1)[1])
bridgetoifmap[bridgeport] = int(ifidx)
try:
bridgetoifmap[bridgeport] = int(ifidx)
except ValueError:
# ifidx might be '', skip in such a case
continue
ifnamemap = {}
havenames = False
for vb in conn.walk('1.3.6.1.2.1.31.1.1.1.1'):
@@ -156,17 +194,41 @@ def _map_switch_backend(args):
ifidx = int(str(ifidx).rsplit('.', 1)[1])
ifnamemap[ifidx] = str(ifname)
maccounts = {}
bridgetoifvalid = False
for mac in mactobridge:
ifname = ifnamemap[bridgetoifmap[mactobridge[mac]]]
try:
ifname = ifnamemap[bridgetoifmap[mactobridge[mac]]]
bridgetoifvalid = True
except KeyError:
continue
if ifname not in maccounts:
maccounts[ifname] = 1
else:
maccounts[ifname] += 1
if not bridgetoifvalid:
bridgetoifmap = {}
# Not a single mac address resolved to an interface index, chances are
# that the switch is broken, and the mactobridge is reporting ifidx
# instead of bridge port index
# try again, skipping the bridgetoifmap lookup
for mac in mactobridge:
try:
ifname = ifnamemap[mactobridge[mac]]
bridgetoifmap[mactobridge[mac]] = mactobridge[mac]
except KeyError:
continue
if ifname not in maccounts:
maccounts[ifname] = 1
else:
maccounts[ifname] += 1
_macsbyswitch[switch] = {}
for mac in mactobridge:
# We want to merge it so that when a mac appears in multiple
# places, it is captured.
ifname = ifnamemap[bridgetoifmap[mactobridge[mac]]]
try:
ifname = ifnamemap[bridgetoifmap[mactobridge[mac]]]
except KeyError:
continue
if mac in _macmap:
_macmap[mac].append((switch, ifname, maccounts[ifname]))
else:
@@ -178,14 +240,34 @@ def _map_switch_backend(args):
nodename = _nodelookup(switch, ifname)
if nodename is not None:
if mac in _nodesbymac and _nodesbymac[mac] != nodename:
log.log({'warning': '{0} and {1} described by ambiguous'
# For example, listed on both a real edge port
# and by accident a trunk port
log.log({'error': '{0} and {1} described by ambiguous'
' switch topology values'.format(nodename,
_nodesbymac[mac]
)})
_nodesbymac[mac] = nodename
_nodesbymac[mac] = None
else:
_nodesbymac[mac] = nodename
def update_macmap(configmanager):
def find_node_by_mac(mac, configmanager):
now = util.monotonic_time()
if vintage and (now - vintage) < 90 and mac in _nodesbymac:
return _nodesbymac[mac]
# do not actually sweep switches more than once every 30 seconds
# however, if there is an update in progress, wait on it
for _ in update_macmap(configmanager, vintage and (now - vintage) < 30):
if mac in _nodesbymac:
return _nodesbymac[mac]
# If update_mac bailed out, still check one last time
return _nodesbymac.get(mac, None)
mapupdating = eventlet.semaphore.Semaphore()
def update_macmap(configmanager, impatient=False):
"""Interrogate switches to build/update mac table
Begin a rebuild process. This process is a generator that will yield
@@ -193,57 +275,205 @@ def update_macmap(configmanager):
recheck the cache as results become possible, rather
than having to wait for the process to complete to interrogate.
"""
if mapupdating.locked():
while mapupdating.locked():
eventlet.sleep(1)
yield None
return
if impatient:
return
completions = _full_updatemacmap(configmanager)
for completion in completions:
try:
yield completion
except GeneratorExit:
# the calling function has stopped caring, but we want to finish
# the sweep, background it
eventlet.spawn_n(_finish_update, completions)
raise
def _finish_update(completions):
for _ in completions:
pass
def _full_updatemacmap(configmanager):
global vintage
global _macmap
global _nodesbymac
global _switchportmap
# Clear all existing entries
_macmap = {}
_nodesbymac = {}
_switchportmap = {}
if configmanager.tenant is not None:
raise exc.ForbiddenRequest('Network topology not available to tenants')
nodelocations = configmanager.get_node_attributes(
configmanager.list_nodes(), ('hardwaremanagement.switch',
'hardwaremanagement.switchport'))
switches = set([])
for node in nodelocations:
cfg = nodelocations[node]
if 'hardwaremanagement.switch' in cfg:
curswitch = cfg['hardwaremanagement.switch']['value']
switches.add(curswitch)
if 'hardwaremanagement.switchport' in cfg:
portname = cfg['hardwaremanagement.switchport']['value']
if curswitch not in _switchportmap:
_switchportmap[curswitch] = {}
if portname in _switchportmap[curswitch]:
log.log({'warning': 'Duplicate switch topology config for '
'{0} and {1}'.format(node,
_switchportmap[
curswitch][
portname])})
_switchportmap[curswitch][portname] = node
switchcfg = configmanager.get_node_attributes(
switches, ('secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword'), decrypt=True)
switchauth = []
for switch in switches:
password = 'public'
user = None
if (switch in switchcfg and
'secret.hardwaremanagementpassword' in switchcfg[switch]):
password = switchcfg[switch]['secret.hardwaremanagementpassword'][
'value']
if 'secret.hardwaremanagementuser' in switchcfg[switch]:
user = switchcfg[switch]['secret.hardwaremanagementuser'][
'value']
switchauth.append((switch, password, user))
pool = GreenPool()
for res in pool.imap(_map_switch, switchauth):
yield res
print(repr(_macmap))
global _macsbyswitch
with mapupdating:
vintage = util.monotonic_time()
# Clear all existing entries
_macmap = {}
_nodesbymac = {}
_switchportmap = {}
_macsbyswitch = {}
if configmanager.tenant is not None:
raise exc.ForbiddenRequest(
'Network topology not available to tenants')
nodelocations = configmanager.get_node_attributes(
configmanager.list_nodes(), ('net*.switch', 'net*.switchport'))
switches = set([])
for node in nodelocations:
cfg = nodelocations[node]
for attr in cfg:
if not attr.endswith('.switch') or 'value' not in cfg[attr]:
continue
curswitch = cfg[attr].get('value', None)
if not curswitch:
continue
switches.add(curswitch)
switchportattr = attr + 'port'
if switchportattr in cfg:
portname = cfg[switchportattr].get('value', '')
if not portname:
continue
if curswitch not in _switchportmap:
_switchportmap[curswitch] = {}
if portname in _switchportmap[curswitch]:
log.log({'error': 'Duplicate switch topology config '
'for {0} and {1}'.format(
node,
_switchportmap[curswitch][
portname])})
_switchportmap[curswitch][portname] = None
else:
_switchportmap[curswitch][portname] = node
switchcfg = configmanager.get_node_attributes(
switches, ('secret.hardwaremanagementuser', 'secret.snmpcommunity',
'secret.hardwaremanagementpassword'), decrypt=True)
switchauth = []
for switch in switches:
if not switch:
continue
switchparms = switchcfg.get(switch, {})
user = None
password = switchparms.get(
'secret.snmpcommunity', {}).get('value', None)
if not password:
password = switchparms.get(
'secret.hardwaremanagementpassword', {}).get('value',
'public')
user = switchparms.get(
'secret.hardwaremanagementuser', {}).get('value', None)
switchauth.append((switch, password, user))
pool = GreenPool()
for ans in pool.imap(_map_switch, switchauth):
vintage = util.monotonic_time()
yield ans
def _dump_locations(info, macaddr, nodename=None):
yield msg.KeyValueData({'possiblenode': nodename, 'mac': macaddr})
retdata = {}
portinfo = []
for location in info:
portinfo.append({'switch': location[0],
'port': location[1], 'macsonport': location[2]})
retdata['ports'] = sorted(portinfo, key=lambda x: x['macsonport'],
reverse=True)
yield msg.KeyValueData(retdata)
def handle_api_request(configmanager, inputdata, operation, pathcomponents):
if operation == 'retrieve':
return handle_read_api_request(pathcomponents)
if (operation in ('update', 'create') and
pathcomponents == ['networking', 'macs', 'rescan']):
if inputdata != {'rescan': 'start'}:
raise exc.InvalidArgumentException()
eventlet.spawn_n(rescan, configmanager)
return [msg.KeyValueData({'rescan': 'started'})]
raise exc.NotImplementedException(
'Operation {0} on {1} not implemented'.format(
operation, '/'.join(pathcomponents)))
def handle_read_api_request(pathcomponents):
# TODO(jjohnson2): discovery core.py api handler design, apply it here
# to make this a less tangled mess as it gets extended
if len(pathcomponents) == 1:
return [msg.ChildCollection('macs/')]
elif len(pathcomponents) == 2:
return [msg.ChildCollection(x) for x in (# 'by-node/',
'by-mac/', 'by-switch/',
'rescan')]
if False and pathcomponents[2] == 'by-node':
# TODO: should be list of node names, and then under that 'by-mac'
if len(pathcomponents) == 3:
return [msg.ChildCollection(x.replace(':', '-'))
for x in sorted(list(_nodesbymac))]
elif len(pathcomponents) == 4:
macaddr = pathcomponents[-1].replace('-', ':')
return dump_macinfo(macaddr)
elif pathcomponents[2] == 'by-mac':
if len(pathcomponents) == 3:
return [msg.ChildCollection(x.replace(':', '-'))
for x in sorted(list(_macmap))]
elif len(pathcomponents) == 4:
return dump_macinfo(pathcomponents[-1])
elif pathcomponents[2] == 'by-switch':
if len(pathcomponents) == 3:
return [msg.ChildCollection(x + '/')
for x in sorted(list(_macsbyswitch))]
if len(pathcomponents) == 4:
return [msg.ChildCollection('by-port/')]
if len(pathcomponents) == 5:
switchname = pathcomponents[-2]
if switchname not in _macsbyswitch:
raise exc.NotFoundException(
'No known macs for switch {0}'.format(switchname))
return [msg.ChildCollection(x.replace('/', '-') + '/')
for x in sorted(list(_macsbyswitch[switchname]))]
if len(pathcomponents) == 6:
return [msg.ChildCollection('by-mac/')]
if len(pathcomponents) == 7:
switchname = pathcomponents[-4]
portname = pathcomponents[-2]
try:
if portname not in _macsbyswitch[switchname]:
portname = portname.replace('-', '/')
maclist = _macsbyswitch[switchname][portname]
except KeyError:
raise exc.NotFoundException('No known macs for switch {0} '
'port {1}'.format(switchname,
portname))
return [msg.ChildCollection(x.replace(':', '-'))
for x in sorted(maclist)]
if len(pathcomponents) == 8:
return dump_macinfo(pathcomponents[-1])
raise exc.NotFoundException('Unrecognized path {0}'.format(
'/'.join(pathcomponents)))
def dump_macinfo(macaddr):
macaddr = macaddr.replace('-', ':')
info = _macmap.get(macaddr, None)
if info is None:
raise exc.NotFoundException(
'{0} not found in mac table of '
'any known switches'.format(macaddr))
return _dump_locations(info, macaddr, _nodesbymac.get(macaddr, None))
def rescan(cfg):
for _ in update_macmap(cfg):
pass
if __name__ == '__main__':
# invoke as switch community
import sys
_map_switch(sys.argv[1], sys.argv[2])
cg = cfm.ConfigManager(None)
for res in update_macmap(cg):
print("map has updated")
if len(sys.argv) > 1:
print(repr(_macmap[sys.argv[1]]))
print(repr(_nodesbymac[sys.argv[1]]))
else:
print("Mac to Node lookup table: -------------------")
print(repr(_nodesbymac))
print("Mac to location lookup table: -------------------")
print(repr(_macmap))
print("switch to fdb lookup table: -------------------")
print(repr(_macsbyswitch))

View File

@@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
# Copyright 2015-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -170,6 +170,17 @@ class NodeRange(object):
def _expandstring(self, element, filternodes=None):
prefix = ''
if element[0][0] in ('/', '~'):
element = ''.join(element)
nameexpression = element[1:]
if self.cfm is None:
raise Exception('Verification configmanager required')
return set(self.cfm.filter_nodenames(nameexpression, filternodes))
elif '=' in element[0] or '!~' in element[0]:
element = ''.join(element)
if self.cfm is None:
raise Exception('Verification configmanager required')
return set(self.cfm.filter_node_attributes(element, filternodes))
for idx in xrange(len(element)):
if element[idx][0] == '[':
nodes = set([])
@@ -191,19 +202,10 @@ class NodeRange(object):
nodes |= NodeRange(
grpcfg['noderange']['value'], self.cfm).nodes
return nodes
if '-' in element and ':' not in element:
return self.expandrange(element, '-')
elif ':' in element: # : range for less ambiguity
if ':' in element: # : range for less ambiguity
return self.expandrange(element, ':')
elif '=' in element or '!~' in element:
if self.cfm is None:
raise Exception('Verification configmanager required')
return set(self.cfm.filter_node_attributes(element, filternodes))
elif element[0] in ('/', '~'):
nameexpression = element[1:]
if self.cfm is None:
raise Exception('Verification configmanager required')
return set(self.cfm.filter_nodenames(nameexpression, filternodes))
elif '-' in element:
return self.expandrange(element, '-')
elif '+' in element:
element, increment = element.split('+')
try:

View File

@@ -28,14 +28,16 @@ def retrieve(nodes, element, configmanager, inputdata):
def retrieve_nodegroup(nodegroup, element, configmanager, inputdata):
grpcfg = configmanager.get_nodegroup_attributes(nodegroup)
if element == 'all':
nodes = []
if 'nodes' in grpcfg:
nodes = list(grpcfg['nodes'])
yield msg.ListAttributes(kv={'nodes': nodes},
desc="The nodes belonging to this group")
for attribute in sorted(allattributes.node.iterkeys()):
theattrs = set(allattributes.node).union(set(grpcfg))
theattrs.add('nodes')
for attribute in sorted(theattrs):
if attribute == 'groups':
continue
if attribute == 'nodes':
yield msg.ListAttributes(
kv={'nodes': list(grpcfg.get('nodes', []))},
desc="The nodes belonging to this group")
continue
if attribute in grpcfg:
val = grpcfg[attribute]
else:
@@ -45,13 +47,17 @@ def retrieve_nodegroup(nodegroup, element, configmanager, inputdata):
kv={attribute: val},
desc=allattributes.node[attribute]['description'])
elif isinstance(val, list):
raise Exception("TODO")
yield msg.ListAttributes(
kv={attribute: val},
desc=allattributes.node.get(
attribute, {}).get('description', ''))
else:
yield msg.Attributes(
kv={attribute: val},
desc=allattributes.node[attribute]['description'])
desc=allattributes.node.get(attribute, {}).get(
'description', ''))
if element == 'current':
for attribute in sorted(grpcfg.iterkeys()):
for attribute in sorted(list(grpcfg)):
currattr = grpcfg[attribute]
if attribute == 'nodes':
desc = 'The nodes belonging to this group'
@@ -61,7 +67,7 @@ def retrieve_nodegroup(nodegroup, element, configmanager, inputdata):
try:
desc = allattributes.node[attribute]['description']
except KeyError:
desc = 'Unknown'
desc = ''
if 'value' in currattr or 'expression' in currattr:
yield msg.Attributes(kv={attribute: currattr}, desc=desc)
elif 'cryptvalue' in currattr:
@@ -86,7 +92,8 @@ def retrieve_nodes(nodes, element, configmanager, inputdata):
attributes = configmanager.get_node_attributes(nodes)
if element[-1] == 'all':
for node in nodes:
for attribute in sorted(allattributes.node.iterkeys()):
theattrs = set(allattributes.node).union(set(attributes[node]))
for attribute in sorted(theattrs):
if attribute in attributes[node]: # have a setting for it
val = attributes[node][attribute]
elif attribute == 'groups': # no setting, provide a blank
@@ -96,23 +103,26 @@ def retrieve_nodes(nodes, element, configmanager, inputdata):
if attribute.startswith('secret.'):
yield msg.CryptedAttributes(
node, {attribute: val},
allattributes.node[attribute]['description'])
allattributes.node.get(
attribute, {}).get('description', ''))
elif isinstance(val, list):
yield msg.ListAttributes(
node, {attribute: val},
allattributes.node[attribute]['description'])
allattributes.node.get(
attribute, {}).get('description', ''))
else:
yield msg.Attributes(
node, {attribute: val},
allattributes.node[attribute]['description'])
allattributes.node.get(
attribute, {}).get('description', ''))
elif element[-1] == 'current':
for node in attributes.iterkeys():
for node in sorted(list(attributes)):
for attribute in sorted(attributes[node].iterkeys()):
currattr = attributes[node][attribute]
try:
desc = allattributes.node[attribute]['description']
except KeyError:
desc = 'Unknown'
desc = ''
if 'value' in currattr or 'expression' in currattr:
yield msg.Attributes(node, {attribute: currattr}, desc)
elif 'cryptvalue' in currattr:

View File

@@ -36,7 +36,8 @@ console.session.socket.getaddrinfo = eventlet.support.greendns.getaddrinfo
def exithandler():
console.session.iothread.join()
if console.session.iothread is not None:
console.session.iothread.join()
atexit.register(exithandler)
@@ -52,6 +53,15 @@ sensor_categories = {
}
class EmptySensor(object):
def __init__(self, name):
self.name = name
self.value = None
self.states = ['Unavailable']
self.units = None
self.health = 'ok'
def hex2bin(hexstring):
hexvals = hexstring.split(':')
if len(hexvals) < 2:
@@ -300,7 +310,6 @@ def perform_requests(operator, nodes, element, cfg, inputdata):
pass
def perform_request(operator, node, element,
configdata, inputdata, cfg, results):
try:
@@ -361,7 +370,7 @@ class IpmiHandler(object):
ipmisess.wait_for_rsp(180)
if not (self.broken or self.loggedin):
raise exc.TargetEndpointUnreachable(
"Login process to " + bmc + " died")
"Login process to " + connparams['bmc'] + " died")
except socket.gaierror as ge:
if ge[0] == -2:
raise exc.TargetEndpointUnreachable(ge[1])
@@ -599,29 +608,31 @@ class IpmiHandler(object):
self.sensormap[simplify_name(resourcename)] = resourcename
def read_sensors(self, sensorname):
try:
if sensorname == 'all':
sensors = self.ipmicmd.get_sensor_descriptions()
readings = []
for sensor in filter(self.match_sensor, sensors):
try:
reading = self.ipmicmd.get_sensor_reading(
sensor['name'])
except pygexc.IpmiException as ie:
if ie.ipmicode == 203:
continue
raise
if hasattr(reading, 'health'):
reading.health = _str_health(reading.health)
readings.append(reading)
self.output.put(msg.SensorReadings(readings, name=self.node))
else:
self.make_sensor_map()
if sensorname not in self.sensormap:
self.output.put(
msg.ConfluentTargetNotFound(self.node,
'Sensor not found'))
return
if sensorname == 'all':
sensors = self.ipmicmd.get_sensor_descriptions()
readings = []
for sensor in filter(self.match_sensor, sensors):
try:
reading = self.ipmicmd.get_sensor_reading(
sensor['name'])
except pygexc.IpmiException as ie:
if ie.ipmicode == 203:
self.output.put(msg.SensorReadings([EmptySensor(
sensor['name'])], name=self.node))
continue
raise
if hasattr(reading, 'health'):
reading.health = _str_health(reading.health)
readings.append(reading)
self.output.put(msg.SensorReadings(readings, name=self.node))
else:
self.make_sensor_map()
if sensorname not in self.sensormap:
self.output.put(
msg.ConfluentTargetNotFound(self.node,
'Sensor not found'))
return
try:
reading = self.ipmicmd.get_sensor_reading(
self.sensormap[sensorname])
if hasattr(reading, 'health'):
@@ -629,8 +640,13 @@ class IpmiHandler(object):
self.output.put(
msg.SensorReadings([reading],
name=self.node))
except pygexc.IpmiException:
self.output.put(msg.ConfluentTargetTimeout(self.node))
except pygexc.IpmiException as ie:
if ie.ipmicode == 203:
self.output.put(msg.ConfluentResourceUnavailable(
self.node, 'Unavailable'
))
else:
self.output.put(msg.ConfluentTargetTimeout(self.node))
def list_inventory(self):
try:

View File

@@ -24,6 +24,7 @@
import confluent.exceptions as exc
import eventlet
from eventlet.support.greendns import getaddrinfo
import pysnmp.smi.error as snmperr
import socket
snmp = eventlet.import_patched('pysnmp.hlapi')
@@ -85,14 +86,22 @@ class Session(object):
walking = snmp.bulkCmd(self.eng, self.authdata, tp, ctx, 0, 10, obj,
lexicographicMode=False)
for rsp in walking:
errstr, errnum, erridx, answers = rsp
if errstr:
raise exc.TargetEndpointUnreachable(str(errstr))
elif errnum:
raise exc.ConfluentException(errnum.prettyPrint())
for ans in answers:
yield ans
try:
for rsp in walking:
errstr, errnum, erridx, answers = rsp
if errstr:
errstr = str(errstr)
if errstr in ('unknownUserName', 'wrongDigest'):
raise exc.TargetEndpointBadCredentials(errstr)
# need to do bad credential versus timeout
raise exc.TargetEndpointUnreachable(errstr)
elif errnum:
raise exc.ConfluentException(errnum.prettyPrint())
for ans in answers:
yield ans
except snmperr.WrongValueError:
raise exc.TargetEndpointBadCredentials('Invalid SNMPv3 password')
if __name__ == '__main__':

View File

@@ -234,8 +234,16 @@ def start_term(authname, cfm, connection, params, path, authdata, skipauth):
consession.reopen()
continue
else:
process_request(connection, data, cfm, authdata, authname,
skipauth)
try:
process_request(connection, data, cfm, authdata, authname,
skipauth)
except Exception:
tracelog.log(traceback.format_exc(),
ltype=log.DataTypes.event,
event=log.Events.stacktrace)
send_data(connection, {'errorcode': 500,
'error': 'Unexpected error'})
send_data(connection, {'_requestdone': 1})
continue
if not data:
consession.destroy()

View File

@@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
# Copyright 2015-2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,10 +20,36 @@ import base64
import confluent.exceptions as cexc
import confluent.log as log
import hashlib
import netifaces
import os
import struct
def list_interface_indexes():
# Getting the interface indexes in a portable manner
# would be better, but there's difficulty from a python perspective.
# For now be linux specific
try:
for iface in os.listdir('/sys/class/net/'):
ifile = open('/sys/class/net/{0}/ifindex'.format(iface), 'r')
intidx = int(ifile.read())
ifile.close()
yield intidx
except (IOError, OSError):
# Probably situation is non-Linux, just do limited support for
# such platforms until other people come alonge
return
def list_ips():
# Used for getting addresses to indicate the multicast address
# as well as getting all the broadcast addresses
for iface in netifaces.interfaces():
addrs = netifaces.ifaddresses(iface)
if netifaces.AF_INET in addrs:
for addr in addrs[netifaces.AF_INET]:
yield addr
def randomstring(length=20):
"""Generate a random string of requested length
@@ -61,6 +87,23 @@ def monotonic_time():
# for now, just support POSIX systems
return os.times()[4]
def get_fingerprint(certificate, algo='sha512'):
if algo != 'sha512':
raise Exception("TODO: Non-sha512")
return 'sha512$' + hashlib.sha512(certificate).hexdigest()
def cert_matches(fingerprint, certificate):
if not fingerprint or not certificate:
return False
algo, _, fp = fingerprint.partition('$')
newfp = None
if algo == 'sha512':
newfp = get_fingerprint(certificate)
return newfp and fingerprint == newfp
class TLSCertVerifier(object):
def __init__(self, configmanager, node, fieldname):
self.cfm = configmanager
@@ -68,7 +111,7 @@ class TLSCertVerifier(object):
self.fieldname = fieldname
def verify_cert(self, certificate):
fingerprint = 'sha512$' + hashlib.sha512(certificate).hexdigest()
fingerprint = get_fingerprint(certificate)
storedprint = self.cfm.get_node_attributes(self.node, (self.fieldname,)
)
if self.fieldname not in storedprint[self.node]: # no stored value, check

View File

@@ -12,7 +12,7 @@ Group: Development/Libraries
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
Prefix: %{_prefix}
BuildArch: noarch
Requires: python-pyghmi, python-eventlet, python-greenlet, python-crypto >= 2.6.1, confluent_client, pyparsing, python-paramiko, python-dns
Requires: python-pyghmi, python-eventlet, python-greenlet, python-crypto >= 2.6.1, confluent_client, pyparsing, python-paramiko, python-dns, python-netifaces, python2-pyasn1, python-pysnmp, python-pyte
Vendor: Jarrod Johnson <jjohnson2@lenovo.com>
Url: http://xcat.sf.net/
@@ -34,7 +34,8 @@ grep -v confluent/__init__.py INSTALLED_FILES.bare > INSTALLED_FILES
cat INSTALLED_FILES
%post
if [ -x /usr/bin/systemctl ]; then /usr/bin/systemctl try-restart confluent; fi
if [ -x /usr/bin/systemctl ]; then /usr/bin/systemctl try-restart confluent >& /dev/null; fi
true
%clean
rm -rf $RPM_BUILD_ROOT

View File

@@ -1,5 +1,18 @@
#!/usr/bin/env python
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys

View File

@@ -6,3 +6,4 @@ if [ "$NUMCOMMITS" != "$VERSION" ]; then
fi
echo $VERSION > VERSION
sed -e "s/#VERSION#/$VERSION/" setup.py.tmpl > setup.py
echo '__version__ = "'$VERSION'"' > confluent/__init__.py

View File

@@ -9,6 +9,10 @@ setup(
url='http://xcat.sf.net/',
description='confluent systems management server',
packages=['confluent', 'confluent/config', 'confluent/interface',
'confluent/discovery/',
'confluent/discovery/protocols/',
'confluent/discovery/handlers/',
'confluent/networking/',
'confluent/plugins/hardwaremanagement/',
'confluent/plugins/shell/',
'confluent/plugins/configuration/'],