mirror of
https://github.com/xcat2/confluent.git
synced 2024-11-22 09:32:21 +00:00
Merge branch 'master' of github.com:jjohnson42/confluent
This commit is contained in:
commit
5b52582302
@ -115,6 +115,7 @@ def print_help():
|
||||
|
||||
|
||||
def updatestatus(stateinfo={}):
|
||||
global powerstate, powertime
|
||||
status = consolename
|
||||
info = []
|
||||
for statekey in stateinfo:
|
||||
@ -129,6 +130,11 @@ def updatestatus(stateinfo={}):
|
||||
# error will be repeated if relevant
|
||||
# avoid keeping it around as stale
|
||||
del laststate['error']
|
||||
if 'state' in stateinfo: # currently only read power means anything
|
||||
newpowerstate = stateinfo['state']['value']
|
||||
if newpowerstate != powerstate and newpowerstate == 'off':
|
||||
sys.stdout.write("\x1b[2J\x1b[;H[powered off]\r\n")
|
||||
powerstate = newpowerstate
|
||||
if 'clientcount' in laststate and laststate['clientcount'] != 1:
|
||||
info.append('clients: %d' % laststate['clientcount'])
|
||||
if 'bufferage' in stateinfo and stateinfo['bufferage'] is not None:
|
||||
@ -715,8 +721,23 @@ def conserver_command(filehandle, localcommand):
|
||||
else:
|
||||
print("Unknown power state.]\r")
|
||||
|
||||
#check_power_state()
|
||||
|
||||
check_power_state()
|
||||
elif localcommand[0] == 'r':
|
||||
sys.stdout.write('\x1b7\x1b[999;999H\x1b[6n')
|
||||
sys.stdout.flush()
|
||||
reply = ''
|
||||
while 'R' not in reply:
|
||||
try:
|
||||
reply += sys.stdin.read(1)
|
||||
except IOError:
|
||||
pass
|
||||
reply = reply.replace('\x1b[', '')
|
||||
reply = reply.replace('R', '')
|
||||
height, width = reply.split(';')
|
||||
sys.stdout.write('\x1b8')
|
||||
sys.stdout.flush()
|
||||
print('sending stty commands]')
|
||||
return 'stty columns {0}\rstty rows {1}\r'.format(width, height)
|
||||
elif localcommand[0] == '?':
|
||||
print("help]\r")
|
||||
print(". exit console\r")
|
||||
@ -727,6 +748,7 @@ def conserver_command(filehandle, localcommand):
|
||||
print("pbs boot to setup\r")
|
||||
print("pbn boot to network\r")
|
||||
print("pb<ent> boot to default\r")
|
||||
print("r send stty command to resize terminal\r")
|
||||
print("<cr> abort command\r")
|
||||
elif localcommand[0] == '\x0d':
|
||||
print("ignored]\r")
|
||||
@ -830,21 +852,11 @@ powertime = None
|
||||
|
||||
|
||||
def check_power_state():
|
||||
global powerstate, powertime
|
||||
for rsp in session.read('/nodes/' + consolename + '/power/state'):
|
||||
if type(rsp) == dict and 'state' in rsp:
|
||||
newpowerstate = rsp['state']['value']
|
||||
powertime = time.time()
|
||||
if newpowerstate != powerstate and newpowerstate == 'off':
|
||||
sys.stdout.write("\x1b[2J\x1b[;H[powered off]\r\n")
|
||||
powerstate = newpowerstate
|
||||
elif type(rsp) == dict and '_requestdone' in rsp:
|
||||
break
|
||||
elif type(rsp) == dict:
|
||||
updatestatus(rsp)
|
||||
else:
|
||||
sys.stdout.write(rsp)
|
||||
sys.stdout.flush()
|
||||
tlvdata.send(
|
||||
session.connection,
|
||||
{'operation': 'retrieve',
|
||||
'path': '/nodes/' + consolename + '/power/state'})
|
||||
return
|
||||
|
||||
|
||||
while inconsole or not doexit:
|
||||
@ -908,8 +920,10 @@ while inconsole or not doexit:
|
||||
tlvdata.send(session.connection, myinput)
|
||||
except IOError:
|
||||
pass
|
||||
#if powerstate is None or powertime < time.time() - 60: # Check powerstate every 60 seconds
|
||||
# check_power_state()
|
||||
if powerstate is None or powertime < time.time() - 60: # Check powerstate every 60 seconds
|
||||
powertime = time.time()
|
||||
powerstate = True
|
||||
check_power_state()
|
||||
else:
|
||||
currcommand = prompt()
|
||||
try:
|
||||
|
@ -34,21 +34,24 @@ if path.startswith('/opt'):
|
||||
sys.path.append(path)
|
||||
|
||||
import confluent.client as client
|
||||
import confluent.sortutil as sortutil
|
||||
|
||||
|
||||
def run():
|
||||
concurrentprocs = 168
|
||||
# among other things, FD_SETSIZE limits. Besides, spawning too many
|
||||
# processes can be unkind for the unaware on memory pressure and such...
|
||||
argparser = optparse.OptionParser(
|
||||
usage="Usage: %prog node commandexpression",
|
||||
usage="Usage: %prog noderange commandexpression",
|
||||
epilog="Expressions are the same as in attributes, e.g. "
|
||||
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
|
||||
argparser.add_option('-c', '--count', type='int', default=168,
|
||||
help='Number of commands to run at a time')
|
||||
# among other things, FD_SETSIZE limits. Besides, spawning too many
|
||||
# processes can be unkind for the unaware on memory pressure and such...
|
||||
argparser.disable_interspersed_args()
|
||||
(options, args) = argparser.parse_args()
|
||||
if len(args) < 2:
|
||||
argparser.print_help()
|
||||
sys.exit(1)
|
||||
concurrentprocs = options.count
|
||||
c = client.Command()
|
||||
cmdstr = " ".join(args[1:])
|
||||
|
||||
@ -76,26 +79,34 @@ def run():
|
||||
sys.exit(exitcode)
|
||||
rdy, _, _ = select.select(all, [], [], 10)
|
||||
while all:
|
||||
pernodeout = {}
|
||||
for r in rdy:
|
||||
data = r.readline()
|
||||
desc = pipedesc[r]
|
||||
if data:
|
||||
node = desc['node']
|
||||
if desc['type'] == 'stdout':
|
||||
sys.stdout.write('{0}: {1}'.format(node,data))
|
||||
sys.stdout.flush()
|
||||
node = desc['node']
|
||||
data = True
|
||||
while data and select.select([r], [], [], 0):
|
||||
data = r.readline()
|
||||
if data:
|
||||
if desc['type'] == 'stdout':
|
||||
if node not in pernodeout:
|
||||
pernodeout[node] = []
|
||||
pernodeout[node].append(data)
|
||||
else:
|
||||
sys.stderr.write('{0}: {1}'.format(node, data))
|
||||
sys.stderr.flush()
|
||||
else:
|
||||
sys.stderr.write('{0}: {1}'.format(node, data))
|
||||
sys.stderr.flush()
|
||||
else:
|
||||
pop = desc['popen']
|
||||
ret = pop.poll()
|
||||
if ret is not None:
|
||||
exitcode = exitcode | ret
|
||||
all.discard(r)
|
||||
if desc['type'] == 'stdout' and pendingexecs:
|
||||
node, cmdv = pendingexecs.popleft()
|
||||
run_cmdv(node, cmdv, all, pipedesc)
|
||||
pop = desc['popen']
|
||||
ret = pop.poll()
|
||||
if ret is not None:
|
||||
exitcode = exitcode | ret
|
||||
all.discard(r)
|
||||
if desc['type'] == 'stdout' and pendingexecs:
|
||||
node, cmdv = pendingexecs.popleft()
|
||||
run_cmdv(node, cmdv, all, pipedesc)
|
||||
for node in sortutil.natural_sort(pernodeout):
|
||||
for line in pernodeout[node]:
|
||||
sys.stdout.write('{0}: {1}'.format(node, line))
|
||||
sys.stdout.flush()
|
||||
if all:
|
||||
rdy, _, _ = select.select(all, [], [], 10)
|
||||
sys.exit(exitcode)
|
||||
|
@ -33,21 +33,25 @@ if path.startswith('/opt'):
|
||||
sys.path.append(path)
|
||||
|
||||
import confluent.client as client
|
||||
import confluent.sortutil as sortutil
|
||||
|
||||
|
||||
def run():
|
||||
concurrentprocs = 168
|
||||
# among other things, FD_SETSIZE limits. Besides, spawning too many
|
||||
# processes can be unkind for the unaware on memory pressure and such...
|
||||
|
||||
argparser = optparse.OptionParser(
|
||||
usage="Usage: %prog node commandexpression",
|
||||
usage="Usage: %prog noderange commandexpression",
|
||||
epilog="Expressions are the same as in attributes, e.g. "
|
||||
"'ipmitool -H {hardwaremanagement.manager}' will be expanded.")
|
||||
argparser.add_option('-c', '--count', type='int', default=168,
|
||||
help='Number of commands to run at a time')
|
||||
# among other things, FD_SETSIZE limits. Besides, spawning too many
|
||||
# processes can be unkind for the unaware on memory pressure and such...
|
||||
argparser.disable_interspersed_args()
|
||||
(options, args) = argparser.parse_args()
|
||||
if len(args) < 2:
|
||||
argparser.print_help()
|
||||
sys.exit(1)
|
||||
concurrentprocs = options.count
|
||||
c = client.Command()
|
||||
cmdstr = " ".join(args[1:])
|
||||
|
||||
@ -76,26 +80,34 @@ def run():
|
||||
sys.exit(exitcode)
|
||||
rdy, _, _ = select.select(all, [], [], 10)
|
||||
while all:
|
||||
pernodeout = {}
|
||||
for r in rdy:
|
||||
data = r.readline()
|
||||
desc = pipedesc[r]
|
||||
if data:
|
||||
node = desc['node']
|
||||
if desc['type'] == 'stdout':
|
||||
sys.stdout.write('{0}: {1}'.format(node,data))
|
||||
sys.stdout.flush()
|
||||
node = desc['node']
|
||||
data = True
|
||||
while data and select.select([r], [], [], 0):
|
||||
data = r.readline()
|
||||
if data:
|
||||
if desc['type'] == 'stdout':
|
||||
if node not in pernodeout:
|
||||
pernodeout[node] = []
|
||||
pernodeout[node].append(data)
|
||||
else:
|
||||
sys.stderr.write('{0}: {1}'.format(node, data))
|
||||
sys.stderr.flush()
|
||||
else:
|
||||
sys.stderr.write('{0}: {1}'.format(node, data))
|
||||
sys.stderr.flush()
|
||||
else:
|
||||
pop = desc['popen']
|
||||
ret = pop.poll()
|
||||
if ret is not None:
|
||||
exitcode = exitcode | ret
|
||||
all.discard(r)
|
||||
if desc['type'] == 'stdout' and pendingexecs:
|
||||
node, cmdv = pendingexecs.popleft()
|
||||
run_cmdv(node, cmdv, all, pipedesc)
|
||||
pop = desc['popen']
|
||||
ret = pop.poll()
|
||||
if ret is not None:
|
||||
exitcode = exitcode | ret
|
||||
all.discard(r)
|
||||
if desc['type'] == 'stdout' and pendingexecs:
|
||||
node, cmdv = pendingexecs.popleft()
|
||||
run_cmdv(node, cmdv, all, pipedesc)
|
||||
for node in sortutil.natural_sort(pernodeout):
|
||||
for line in pernodeout[node]:
|
||||
sys.stdout.ouwrite('{0}: {1}'.format(node, line))
|
||||
sys.stdout.flush()
|
||||
if all:
|
||||
rdy, _, _ = select.select(all, [], [], 10)
|
||||
sys.exit(exitcode)
|
||||
|
43
confluent_client/confluent/sortutil.py
Normal file
43
confluent_client/confluent/sortutil.py
Normal file
@ -0,0 +1,43 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2014 IBM Corporation
|
||||
# Copyright 2015-2016 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
|
||||
numregex = re.compile('([0-9]+)')
|
||||
|
||||
|
||||
def naturalize_string(key):
|
||||
"""Analyzes string in a human way to enable natural sort
|
||||
|
||||
:param key: The node name to analyze
|
||||
:returns: A structure that can be consumed by 'sorted'
|
||||
"""
|
||||
return [int(text) if text.isdigit() else text.lower()
|
||||
for text in re.split(numregex, key)]
|
||||
|
||||
|
||||
def natural_sort(iterable):
|
||||
"""Return a sort using natural sort if possible
|
||||
|
||||
:param iterable:
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
return sorted(iterable, key=naturalize_string)
|
||||
except TypeError:
|
||||
# The natural sort attempt failed, fallback to ascii sort
|
||||
return sorted(iterable)
|
@ -446,23 +446,13 @@ class InputExpression(ConfluentMessage):
|
||||
# so that it can make it intact to the pertinent configmanager function
|
||||
def __init__(self, path, inputdata, nodes=None):
|
||||
self.nodeattribs = {}
|
||||
nestedmode = False
|
||||
if not inputdata:
|
||||
raise exc.InvalidArgumentException('no request data provided')
|
||||
if nodes is None:
|
||||
self.attribs = inputdata
|
||||
return
|
||||
for node in nodes:
|
||||
if node in inputdata:
|
||||
nestedmode = True
|
||||
self.nodeattribs[node] = inputdata[node]
|
||||
if nestedmode:
|
||||
for key in inputdata:
|
||||
if key not in nodes:
|
||||
raise exc.InvalidArgumentException
|
||||
else:
|
||||
for node in nodes:
|
||||
self.nodeattribs[node] = inputdata
|
||||
self.nodeattribs[node] = inputdata
|
||||
|
||||
def get_attributes(self, node):
|
||||
if node not in self.nodeattribs:
|
||||
@ -484,7 +474,6 @@ class InputAttributes(ConfluentMessage):
|
||||
# to the client
|
||||
def __init__(self, path, inputdata, nodes=None):
|
||||
self.nodeattribs = {}
|
||||
nestedmode = False
|
||||
if not inputdata:
|
||||
raise exc.InvalidArgumentException('no request data provided')
|
||||
if nodes is None:
|
||||
@ -509,16 +498,7 @@ class InputAttributes(ConfluentMessage):
|
||||
'expression': self.attribs[attrib]}
|
||||
return
|
||||
for node in nodes:
|
||||
if node in inputdata:
|
||||
nestedmode = True
|
||||
self.nodeattribs[node] = inputdata[node]
|
||||
if nestedmode:
|
||||
for key in inputdata:
|
||||
if key not in nodes:
|
||||
raise exc.InvalidArgumentException
|
||||
else:
|
||||
for node in nodes:
|
||||
self.nodeattribs[node] = inputdata
|
||||
self.nodeattribs[node] = inputdata
|
||||
|
||||
def get_attributes(self, node):
|
||||
if node not in self.nodeattribs:
|
||||
@ -555,7 +535,6 @@ class InputCredential(ConfluentMessage):
|
||||
|
||||
def __init__(self, path, inputdata, nodes=None):
|
||||
self.credentials = {}
|
||||
nestedmode = False
|
||||
if not inputdata:
|
||||
raise exc.InvalidArgumentException('no request data provided')
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
import confluent.exceptions as exc
|
||||
import confluent.messages as msg
|
||||
import confluent.config.attributes as allattributes
|
||||
import confluent.util as util
|
||||
|
||||
|
||||
def retrieve(nodes, element, configmanager, inputdata):
|
||||
@ -175,8 +176,11 @@ def _expand_expression(nodes, configmanager, inputdata):
|
||||
expression = expression['expression']
|
||||
if type(expression) is dict:
|
||||
expression = expression['expression']
|
||||
pernodeexpressions = {}
|
||||
for expanded in configmanager.expand_attrib_expression(nodes, expression):
|
||||
yield msg.KeyValueData({'value': expanded[1]}, expanded[0])
|
||||
pernodeexpressions[expanded[0]] = expanded[1]
|
||||
for node in util.natural_sort(pernodeexpressions):
|
||||
yield msg.KeyValueData({'value': pernodeexpressions[node]}, node)
|
||||
|
||||
|
||||
def create(nodes, element, configmanager, inputdata):
|
||||
|
Loading…
Reference in New Issue
Block a user