mirror of
https://github.com/xcat2/confluent.git
synced 2025-01-28 20:07:48 +00:00
Merge branch '18csi'
This commit is contained in:
commit
a3bc17b465
@ -45,63 +45,71 @@ _confluent_get_args()
|
||||
NUMARGS=${#CMPARGS[@]}
|
||||
if [ "${COMP_WORDS[-1]}" == '' ]; then
|
||||
NUMARGS=$((NUMARGS+1))
|
||||
CMPARGS+=("")
|
||||
fi
|
||||
GENNED=""
|
||||
for CAND in ${COMP_CANDIDATES[@]}; do
|
||||
candarray=(${CAND//,/ })
|
||||
matched=0
|
||||
for c in "${candarray[@]}"; do
|
||||
for arg in "${CMPARGS[@]}"; do
|
||||
if [ "$arg" = "$c" ]; then
|
||||
matched=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [ 0 = $matched ]; then
|
||||
for c in "${candarray[@]}"; do
|
||||
GENNED+=" $c"
|
||||
done
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
_confluent_nodeidentify_completion()
|
||||
function _confluent_generic_completion()
|
||||
{
|
||||
_confluent_get_args
|
||||
if [ $NUMARGS == 3 ]; then
|
||||
COMPREPLY=($(compgen -W "on off" -- ${COMP_WORDS[-1]}))
|
||||
if [ $NUMARGS -ge 3 ] && [ ! -z "$GENNED" ]; then
|
||||
COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[-1]}))
|
||||
fi
|
||||
if [ $NUMARGS -lt 3 ]; then
|
||||
_confluent_nr_completion
|
||||
return;
|
||||
fi
|
||||
}
|
||||
_confluent_nodeidentify_completion()
|
||||
{
|
||||
COMP_CANDIDATES=("on,off -h")
|
||||
_confluent_generic_completion
|
||||
}
|
||||
|
||||
|
||||
_confluent_nodesetboot_completion()
|
||||
{
|
||||
_confluent_get_args
|
||||
if [ $NUMARGS == 3 ]; then
|
||||
COMPREPLY=($(compgen -W "default cd network setup hd" -- ${COMP_WORDS[-1]}))
|
||||
fi
|
||||
if [ $NUMARGS -lt 3 ]; then
|
||||
_confluent_nr_completion
|
||||
return;
|
||||
fi
|
||||
COMP_CANDIDATES=("default,cd,network,setup,hd -h -b -p")
|
||||
_confluent_generic_completion
|
||||
}
|
||||
|
||||
_confluent_nodepower_completion()
|
||||
{
|
||||
_confluent_get_args
|
||||
if [ "${CMPARGS[-1]:0:1}" == '-' ]; then
|
||||
COMPREPLY=($(compgen -W "-h -p" -- ${COMP_WORDS[-1]}))
|
||||
return
|
||||
fi
|
||||
if [ $NUMARGS == 3 ]; then
|
||||
COMPREPLY=($(compgen -W "boot off on status" -- ${COMP_WORDS[-1]}))
|
||||
return;
|
||||
fi
|
||||
if [ $NUMARGS -lt 3 ]; then
|
||||
_confluent_nr_completion
|
||||
return;
|
||||
fi
|
||||
COMP_CANDIDATES=("boot,off,on,status -h -p")
|
||||
_confluent_generic_completion
|
||||
}
|
||||
|
||||
_confluent_nodemedia_completion()
|
||||
{
|
||||
COMP_CANDIDATES=("list,upload,attach,detachall -h")
|
||||
_confluent_get_args
|
||||
if [ $NUMARGS == 3 ]; then
|
||||
COMPREPLY=($(compgen -W "list upload attach detachall" -- ${COMP_WORDS[-1]}))
|
||||
return;
|
||||
fi
|
||||
if [ $NUMARGS -gt 3 ] && [ ${CMPARGS[2]} == 'upload' ]; then
|
||||
if [ $NUMARGS -gt 3 ] && [ ${CMPARGS[-2]} == 'upload' ]; then
|
||||
compopt -o default
|
||||
COMPREPLY=()
|
||||
return
|
||||
fi
|
||||
if [ $NUMARGS -ge 3 ] && [ ! -z "$GENNED" ]; then
|
||||
COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[-1]}))
|
||||
return;
|
||||
fi
|
||||
if [ $NUMARGS -lt 3 ]; then
|
||||
_confluent_nr_completion
|
||||
return;
|
||||
|
@ -4,12 +4,44 @@ nodeboot(8) -- Reboot a confluent node to a specific device
|
||||
## SYNOPSIS
|
||||
|
||||
`nodeboot <noderange>`
|
||||
`nodeboot <noderange>` [net|setup]
|
||||
`nodeboot [options] <noderange>` [default|cd|network|setup|hd]
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
**nodeboot** reboots nodes in a noderange. If an additional argument is given,
|
||||
it sets the node to specifically boot to that as the next boot.
|
||||
it sets the node to specifically boot to that as the next boot. This
|
||||
performs an immediate reboot without waiting for the OS. To set the boot
|
||||
device without inducing a reboot, see the `nodesetboot` command.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
* `-b`, `--bios`:
|
||||
For a system that supports both BIOS and UEFI style boot, request BIOS style
|
||||
boot if supported (some platforms will UEFI boot with this flag anyway).
|
||||
|
||||
* `-u`, `--uefi`:
|
||||
This flag does nothing, it is for command compatibility with xCAT's rsetboot
|
||||
|
||||
* `-p`, `--persist`:
|
||||
For a system that supports it, mark the boot override to persist rather than
|
||||
be a one time change. Many systems do not support this functionality.
|
||||
|
||||
* `default`:
|
||||
Request a normal default boot with no particular device override
|
||||
|
||||
* `cd`:
|
||||
Request boot from media. Note that this can include physical CD,
|
||||
remote media mounted as CD/DVD, and detachable hard disks drives such as usb
|
||||
key devices.
|
||||
|
||||
* `network`:
|
||||
Request boot to network
|
||||
|
||||
* `setup`:
|
||||
Request to enter the firmware configuration menu (e.g. F1 setup) on next boot.
|
||||
|
||||
* `hd`:
|
||||
Boot straight to hard disk drive
|
||||
|
||||
## EXAMPLES
|
||||
* Booting n3 and n4 to the default boot behavior:
|
||||
|
@ -7,7 +7,7 @@ nodegroupdefine(8) -- Define new confluent node group
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
`nodegroupdefine` allows the definition of a new node for the confluent management
|
||||
`nodegroupdefine` allows the definition of a new nodegroup for the confluent management
|
||||
service. It may only define a single group name at a time.
|
||||
It has the same syntax as `nodegroupattrib(8)`, and the commands differ in
|
||||
that `nodegroupattrib(8)` will error if a node group does not exist.
|
||||
|
@ -30,7 +30,10 @@ control.
|
||||
* `-p`, `--persist`:
|
||||
For a system that supports it, mark the boot override to persist rather than
|
||||
be a one time change. Many systems do not support this functionality.
|
||||
|
||||
|
||||
* `-u`, `--uefi`:
|
||||
This flag does nothing, it is for command compatibility with xCAT's rsetboot
|
||||
|
||||
* `default`:
|
||||
Request a normal default boot with no particular device override
|
||||
|
||||
|
@ -66,8 +66,9 @@ def join_collective(server, invitation):
|
||||
'invitation': invitation,
|
||||
'server': server}})
|
||||
res = tlvdata.recv(s)
|
||||
print(res.get('collective',
|
||||
{'status': 'Unknown response: ' + repr(res)})['status'])
|
||||
res = res.get('collective',
|
||||
{'status': 'Unknown response: ' + repr(res)})
|
||||
print(res.get('status', res['error']))
|
||||
|
||||
|
||||
def show_collective():
|
||||
|
@ -35,7 +35,6 @@ except ImportError:
|
||||
crypto = None
|
||||
|
||||
currentleader = None
|
||||
cfginitlock = None
|
||||
follower = None
|
||||
retrythread = None
|
||||
|
||||
@ -54,10 +53,7 @@ leader_init = ContextBool()
|
||||
|
||||
def connect_to_leader(cert=None, name=None, leader=None):
|
||||
global currentleader
|
||||
global cfginitlock
|
||||
global follower
|
||||
if cfginitlock is None:
|
||||
cfginitlock = threading.RLock()
|
||||
if leader is None:
|
||||
leader = currentleader
|
||||
log.log({'info': 'Attempting connection to leader {0}'.format(leader),
|
||||
@ -70,7 +66,7 @@ def connect_to_leader(cert=None, name=None, leader=None):
|
||||
'subsystem': 'collective'})
|
||||
return False
|
||||
with connecting:
|
||||
with cfginitlock:
|
||||
with cfm._initlock:
|
||||
tlvdata.recv(remote) # the banner
|
||||
tlvdata.recv(remote) # authpassed... 0..
|
||||
if name is None:
|
||||
@ -520,13 +516,10 @@ def become_leader(connection):
|
||||
|
||||
|
||||
def startup():
|
||||
global cfginitlock
|
||||
members = list(cfm.list_collective())
|
||||
if len(members) < 2:
|
||||
# Not in collective mode, return
|
||||
return
|
||||
if cfginitlock is None:
|
||||
cfginitlock = threading.RLock()
|
||||
eventlet.spawn_n(start_collective)
|
||||
|
||||
def start_collective():
|
||||
|
@ -82,6 +82,7 @@ _dirtylock = threading.RLock()
|
||||
_leaderlock = gthread.RLock()
|
||||
_synclock = threading.RLock()
|
||||
_rpclock = gthread.RLock()
|
||||
_initlock = gthread.RLock()
|
||||
_followerlocks = {}
|
||||
_config_areas = ('nodegroups', 'nodes', 'usergroups', 'users')
|
||||
tracelog = None
|
||||
@ -953,38 +954,39 @@ class ConfigManager(object):
|
||||
|
||||
def __init__(self, tenant, decrypt=False, username=None):
|
||||
global _cfgstore
|
||||
if _cfgstore is None:
|
||||
init()
|
||||
self.decrypt = decrypt
|
||||
self.current_user = username
|
||||
if tenant is None:
|
||||
self.tenant = None
|
||||
if 'main' not in _cfgstore:
|
||||
_cfgstore['main'] = {}
|
||||
with _initlock:
|
||||
if _cfgstore is None:
|
||||
init()
|
||||
self.decrypt = decrypt
|
||||
self.current_user = username
|
||||
if tenant is None:
|
||||
self.tenant = None
|
||||
if 'main' not in _cfgstore:
|
||||
_cfgstore['main'] = {}
|
||||
self._bg_sync_to_file()
|
||||
self._cfgstore = _cfgstore['main']
|
||||
if 'nodegroups' not in self._cfgstore:
|
||||
self._cfgstore['nodegroups'] = {'everything': {'nodes': set()}}
|
||||
_mark_dirtykey('nodegroups', 'everything', self.tenant)
|
||||
self._bg_sync_to_file()
|
||||
if 'nodes' not in self._cfgstore:
|
||||
self._cfgstore['nodes'] = {}
|
||||
self._bg_sync_to_file()
|
||||
return
|
||||
elif 'tenant' not in _cfgstore:
|
||||
_cfgstore['tenant'] = {tenant: {}}
|
||||
self._bg_sync_to_file()
|
||||
self._cfgstore = _cfgstore['main']
|
||||
elif tenant not in _cfgstore['tenant']:
|
||||
_cfgstore['tenant'][tenant] = {}
|
||||
self._bg_sync_to_file()
|
||||
self.tenant = tenant
|
||||
self._cfgstore = _cfgstore['tenant'][tenant]
|
||||
if 'nodegroups' not in self._cfgstore:
|
||||
self._cfgstore['nodegroups'] = {'everything': {'nodes': set()}}
|
||||
self._cfgstore['nodegroups'] = {'everything': {}}
|
||||
_mark_dirtykey('nodegroups', 'everything', self.tenant)
|
||||
self._bg_sync_to_file()
|
||||
if 'nodes' not in self._cfgstore:
|
||||
self._cfgstore['nodes'] = {}
|
||||
self._bg_sync_to_file()
|
||||
return
|
||||
elif 'tenant' not in _cfgstore:
|
||||
_cfgstore['tenant'] = {tenant: {}}
|
||||
self._bg_sync_to_file()
|
||||
elif tenant not in _cfgstore['tenant']:
|
||||
_cfgstore['tenant'][tenant] = {}
|
||||
self._bg_sync_to_file()
|
||||
self.tenant = tenant
|
||||
self._cfgstore = _cfgstore['tenant'][tenant]
|
||||
if 'nodegroups' not in self._cfgstore:
|
||||
self._cfgstore['nodegroups'] = {'everything': {}}
|
||||
_mark_dirtykey('nodegroups', 'everything', self.tenant)
|
||||
if 'nodes' not in self._cfgstore:
|
||||
self._cfgstore['nodes'] = {}
|
||||
self._bg_sync_to_file()
|
||||
|
||||
def get_collective_member(self, name):
|
||||
return get_collective_member(name)
|
||||
|
@ -873,8 +873,16 @@ def dispatch_request(nodes, manager, element, configmanager, inputdata,
|
||||
certfile='/etc/confluent/srvcert.pem')
|
||||
except Exception:
|
||||
for node in nodes:
|
||||
yield msg.ConfluentResourceUnavailable(
|
||||
node, 'Collective member {0} is unreachable'.format(a['name']))
|
||||
if a:
|
||||
yield msg.ConfluentResourceUnavailable(
|
||||
node, 'Collective member {0} is unreachable'.format(
|
||||
a['name']))
|
||||
else:
|
||||
yield msg.ConfluentResourceUnavailable(
|
||||
node,
|
||||
'"{0}" is not recognized as a collective member'.format(
|
||||
manager))
|
||||
|
||||
return
|
||||
if not util.cert_matches(a['fingerprint'], remote.getpeercert(
|
||||
binary_form=True)):
|
||||
|
@ -106,6 +106,7 @@ class PubkeyInvalid(ConfluentException):
|
||||
super(PubkeyInvalid, self).__init__(self, text)
|
||||
self.fingerprint = fingerprint
|
||||
self.attrname = attribname
|
||||
self.message = text
|
||||
bodydata = {'message': text,
|
||||
'event': event,
|
||||
'fingerprint': fingerprint,
|
||||
|
@ -171,7 +171,7 @@ def _extract_neighbor_data_b(args):
|
||||
|
||||
args are carried as a tuple, because of eventlet convenience
|
||||
"""
|
||||
switch, password, user, force = args
|
||||
switch, password, user, force = args[:4]
|
||||
vintage = _neighdata.get(switch, {}).get('!!vintage', 0)
|
||||
now = util.monotonic_time()
|
||||
if vintage > (now - 60) and not force:
|
||||
@ -220,17 +220,19 @@ def _extract_neighbor_data_b(args):
|
||||
_neighdata[switch] = lldpdata
|
||||
|
||||
|
||||
def update_switch_data(switch, configmanager, force=False):
|
||||
def update_switch_data(switch, configmanager, force=False, retexc=False):
|
||||
switchcreds = netutil.get_switchcreds(configmanager, (switch,))[0]
|
||||
_extract_neighbor_data(switchcreds + (force,))
|
||||
ndr = _extract_neighbor_data(switchcreds + (force, retexc))
|
||||
if retexc and isinstance(ndr, Exception):
|
||||
raise ndr
|
||||
return _neighdata.get(switch, {})
|
||||
|
||||
|
||||
def update_neighbors(configmanager, force=False):
|
||||
return _update_neighbors_backend(configmanager, force)
|
||||
def update_neighbors(configmanager, force=False, retexc=False):
|
||||
return _update_neighbors_backend(configmanager, force, retexc)
|
||||
|
||||
|
||||
def _update_neighbors_backend(configmanager, force):
|
||||
def _update_neighbors_backend(configmanager, force, retexc):
|
||||
global _neighdata
|
||||
global _neighbypeerid
|
||||
vintage = _neighdata.get('!!vintage', 0)
|
||||
@ -241,7 +243,7 @@ def _update_neighbors_backend(configmanager, force):
|
||||
_neighbypeerid = {'!!vintage': now}
|
||||
switches = netutil.list_switches(configmanager)
|
||||
switchcreds = netutil.get_switchcreds(configmanager, switches)
|
||||
switchcreds = [ x + (force,) for x in switchcreds]
|
||||
switchcreds = [ x + (force, retexc) for x in switchcreds]
|
||||
pool = GreenPool(64)
|
||||
for ans in pool.imap(_extract_neighbor_data, switchcreds):
|
||||
yield ans
|
||||
@ -258,9 +260,15 @@ def _extract_neighbor_data(args):
|
||||
return
|
||||
try:
|
||||
with _updatelocks[switch]:
|
||||
_extract_neighbor_data_b(args)
|
||||
except Exception:
|
||||
log.logtrace()
|
||||
return _extract_neighbor_data_b(args)
|
||||
except Exception as e:
|
||||
yieldexc = False
|
||||
if len(args) >= 5:
|
||||
yieldexc = args[4]
|
||||
if yieldexc:
|
||||
return e
|
||||
else:
|
||||
log.logtrace()
|
||||
|
||||
if __name__ == '__main__':
|
||||
# a quick one-shot test, args are switch and snmpv1 string for now
|
||||
@ -327,7 +335,9 @@ def _handle_neighbor_query(pathcomponents, configmanager):
|
||||
# guaranteed
|
||||
if (parms['by-peerid'] not in _neighbypeerid and
|
||||
_neighbypeerid.get('!!vintage', 0) < util.monotonic_time() - 60):
|
||||
list(update_neighbors(configmanager))
|
||||
for x in update_neighbors(configmanager, retexc=True):
|
||||
if isinstance(x, Exception):
|
||||
raise x
|
||||
if parms['by-peerid'] not in _neighbypeerid:
|
||||
raise exc.NotFoundException('No matching peer known')
|
||||
return _dump_neighbordatum(_neighbypeerid[parms['by-peerid']])
|
||||
@ -336,9 +346,11 @@ def _handle_neighbor_query(pathcomponents, configmanager):
|
||||
if listrequested not in multi_selectors | single_selectors:
|
||||
raise exc.NotFoundException('{0} is not found'.format(listrequested))
|
||||
if 'by-switch' in parms:
|
||||
update_switch_data(parms['by-switch'], configmanager)
|
||||
update_switch_data(parms['by-switch'], configmanager, retexc=True)
|
||||
else:
|
||||
list(update_neighbors(configmanager))
|
||||
for x in update_neighbors(configmanager, retexc=True):
|
||||
if isinstance(x, Exception):
|
||||
raise x
|
||||
return list_info(parms, listrequested)
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2015 Lenovo
|
||||
# Copyright 2015-2018 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -113,13 +113,48 @@ class SshShell(conapi.Console):
|
||||
self.password = ''
|
||||
self.datacallback('\r\nlogin as: ')
|
||||
return
|
||||
except cexc.PubkeyInvalid as pi:
|
||||
self.keyaction = ''
|
||||
self.candidatefprint = pi.fingerprint
|
||||
self.datacallback(pi.message)
|
||||
self.keyattrname = pi.attrname
|
||||
self.datacallback('\r\nNew fingerprint: ' + pi.fingerprint)
|
||||
self.inputmode = -1
|
||||
self.datacallback('\r\nEnter "disconnect" or "accept": ')
|
||||
return
|
||||
self.inputmode = 2
|
||||
self.connected = True
|
||||
self.shell = self.ssh.invoke_shell()
|
||||
self.rxthread = eventlet.spawn(self.recvdata)
|
||||
|
||||
def write(self, data):
|
||||
if self.inputmode == 0:
|
||||
if self.inputmode == -1:
|
||||
while len(data) and data[0] == b'\x7f' and len(self.keyaction):
|
||||
self.datacallback('\b \b') # erase previously echoed value
|
||||
self.keyaction = self.keyaction[:-1]
|
||||
data = data[1:]
|
||||
while len(data) and data[0] == b'\x7f':
|
||||
data = data[1:]
|
||||
while b'\x7f' in data:
|
||||
delidx = data.index(b'\x7f')
|
||||
data = data[:delidx - 1] + data[delidx + 1:]
|
||||
self.keyaction += data
|
||||
if '\r' in self.keyaction:
|
||||
action = self.keyaction.split('\r')[0]
|
||||
if action.lower() == 'accept':
|
||||
self.nodeconfig.set_node_attributes(
|
||||
{self.node:
|
||||
{self.keyattrname: self.candidatefprint}})
|
||||
self.datacallback('\r\n')
|
||||
self.logon()
|
||||
elif action.lower() == 'disconnect':
|
||||
self.datacallback(conapi.ConsoleEvent.Disconnect)
|
||||
else:
|
||||
self.keyaction = ''
|
||||
self.datacallback('\r\nEnter "disconnect" or "accept": ')
|
||||
elif len(data) > 0:
|
||||
self.datacallback(data)
|
||||
elif self.inputmode == 0:
|
||||
while len(data) and data[0] == b'\x7f' and len(self.username):
|
||||
self.datacallback('\b \b') # erase previously echoed value
|
||||
self.username = self.username[:-1]
|
||||
|
@ -92,12 +92,17 @@ class Session(object):
|
||||
errstr, errnum, erridx, answers = rsp
|
||||
if errstr:
|
||||
errstr = str(errstr)
|
||||
if errstr in ('unknownUserName', 'wrongDigest'):
|
||||
raise exc.TargetEndpointBadCredentials(errstr)
|
||||
finerr = errstr + ' while trying to connect to ' \
|
||||
'{0}'.format(self.server)
|
||||
if errstr in ('Unknown USM user', 'unknownUserName',
|
||||
'wrongDigest', 'Wrong SNMP PDU digest'):
|
||||
raise exc.TargetEndpointBadCredentials(finerr)
|
||||
# need to do bad credential versus timeout
|
||||
raise exc.TargetEndpointUnreachable(errstr)
|
||||
raise exc.TargetEndpointUnreachable(finerr)
|
||||
elif errnum:
|
||||
raise exc.ConfluentException(errnum.prettyPrint())
|
||||
raise exc.ConfluentException(errnum.prettyPrint() +
|
||||
' while trying to connect to '
|
||||
'{0}'.format(self.server))
|
||||
for ans in answers:
|
||||
if not obj[0].isPrefixOf(ans[0]):
|
||||
# PySNMP returns leftovers in a bulk command
|
||||
|
Loading…
x
Reference in New Issue
Block a user