mirror of
https://github.com/xcat2/confluent.git
synced 2025-04-04 17:48:35 +00:00
Trim as-yet unused attributes.
Have RESTful access to creating nodes work
This commit is contained in:
parent
7752bbbc27
commit
fe63b1421e
@ -52,105 +52,106 @@ nic = {
|
||||
# 'node', which can be considered a 'system' or a 'vm'
|
||||
node = {
|
||||
'groups': {
|
||||
'type': (list, tuple),
|
||||
'default': 'all',
|
||||
'description': ('List of static groups for which this node is'
|
||||
'considered a member'),
|
||||
},
|
||||
'type': {
|
||||
'description': ('Classification of node as system, vm, etc')
|
||||
},
|
||||
'id': {
|
||||
'description': ('Numeric identifier for node')
|
||||
},
|
||||
'location.timezone': {
|
||||
'description': 'POSIX timezone to apply to this node',
|
||||
},
|
||||
'status.summary': {
|
||||
'description': ('An assessment of the overall health of the node. It'
|
||||
'can be "optimal", "warning", "critical"'),
|
||||
},
|
||||
'status.lastheartbeat': {
|
||||
'description': 'Timestamp of last received heartbeat',
|
||||
},
|
||||
'status.heartbeatexpiry': {
|
||||
'description': 'Time when Heartbeat will be considered expired',
|
||||
},
|
||||
'status.deployment': {
|
||||
'description': 'State of any deployment activity in progress',
|
||||
},
|
||||
'status.faultdetails': {
|
||||
'description': 'Detailed problem data, if any',
|
||||
},
|
||||
'network.gateway': {
|
||||
'description': 'Default gateway to configure node with',
|
||||
},
|
||||
'network.nameservers': {
|
||||
'description': '''DNS servers for node to use''',
|
||||
},
|
||||
'network.domain': {
|
||||
'description': 'Value to append to nodename, if any, to get FQDN',
|
||||
},
|
||||
'network.interfaces': {
|
||||
'dictof': 'nic',
|
||||
'description': ('Dict of network interfaces to configure on node. '
|
||||
'Keyed on hardware address.'),
|
||||
},
|
||||
'storage.osvolume': {
|
||||
'default': 'auto',
|
||||
'description': 'Description of storage to target when deploying OS',
|
||||
},
|
||||
'storage.clientiqn': {
|
||||
'description': ('Indicates IQN used by this node when communicating'
|
||||
'with iSCSI servers'),
|
||||
},
|
||||
'storage.iscsiserver': {
|
||||
'description': 'Address of iSCSI server used for boot if applicable',
|
||||
},
|
||||
'storage.pool': {
|
||||
'description': ('For scenarios like SAN boot and virtualization, this'
|
||||
'describes the pool to allocate boot volume from'),
|
||||
},
|
||||
'os.imagename': {
|
||||
'description': 'The OS Image applied or to be applied to node',
|
||||
},
|
||||
'console.speed': {
|
||||
'default': 'auto',
|
||||
'description': ('Indicate the speed at which to run serial port.'
|
||||
'Default behavior is to autodetect the appropriate'
|
||||
'value as possible')
|
||||
},
|
||||
'console.port': {
|
||||
'default': 'auto',
|
||||
'description': ('Indicate which port to use for text console. Default'
|
||||
'behavior is to auto detect the value appropriate for'
|
||||
'the platform. "Disable" can be used to suppress'
|
||||
'serial console configuration')
|
||||
},
|
||||
#'type': {
|
||||
# 'description': ('Classification of node as system, vm, etc')
|
||||
#},
|
||||
#'id': {
|
||||
# 'description': ('Numeric identifier for node')
|
||||
#},
|
||||
# 'location.timezone': {
|
||||
# 'description': 'POSIX timezone to apply to this node',
|
||||
# },
|
||||
# 'status.summary': {
|
||||
# 'description': ('An assessment of the overall health of the node. It'
|
||||
# 'can be "optimal", "warning", "critical"'),
|
||||
# },
|
||||
# 'status.lastheartbeat': {
|
||||
# 'description': 'Timestamp of last received heartbeat',
|
||||
# },
|
||||
# 'status.heartbeatexpiry': {
|
||||
# 'description': 'Time when Heartbeat will be considered expired',
|
||||
# },
|
||||
# 'status.deployment': {
|
||||
# 'description': 'State of any deployment activity in progress',
|
||||
# },
|
||||
# 'status.faultdetails': {
|
||||
# 'description': 'Detailed problem data, if any',
|
||||
# },
|
||||
# 'network.gateway': {
|
||||
# 'description': 'Default gateway to configure node with',
|
||||
# },
|
||||
# 'network.nameservers': {
|
||||
# 'description': '''DNS servers for node to use''',
|
||||
# },
|
||||
# 'network.domain': {
|
||||
# 'description': 'Value to append to nodename, if any, to get FQDN',
|
||||
# },
|
||||
# 'network.interfaces': {
|
||||
# 'dictof': 'nic',
|
||||
# 'description': ('Dict of network interfaces to configure on node. '
|
||||
# 'Keyed on hardware address.'),
|
||||
# },
|
||||
# 'storage.osvolume': {
|
||||
# 'default': 'auto',
|
||||
# 'description': 'Description of storage to target when deploying OS',
|
||||
# },
|
||||
# 'storage.clientiqn': {
|
||||
# 'description': ('Indicates IQN used by this node when communicating'
|
||||
# 'with iSCSI servers'),
|
||||
# },
|
||||
# 'storage.iscsiserver': {
|
||||
# 'description': 'Address of iSCSI server used for boot if applicable',
|
||||
# },
|
||||
# 'storage.pool': {
|
||||
# 'description': ('For scenarios like SAN boot and virtualization, this'
|
||||
# 'describes the pool to allocate boot volume from'),
|
||||
# },
|
||||
# 'os.imagename': {
|
||||
# 'description': 'The OS Image applied or to be applied to node',
|
||||
# },
|
||||
# 'console.speed': {
|
||||
# 'default': 'auto',
|
||||
# 'description': ('Indicate the speed at which to run serial port.'
|
||||
# 'Default behavior is to autodetect the appropriate'
|
||||
# 'value as possible')
|
||||
# },
|
||||
# 'console.port': {
|
||||
# 'default': 'auto',
|
||||
# 'description': ('Indicate which port to use for text console. Default'
|
||||
# 'behavior is to auto detect the value appropriate for'
|
||||
# 'the platform. "Disable" can be used to suppress'
|
||||
# 'serial console configuration')
|
||||
# },
|
||||
'console.method': {
|
||||
'description': ('Indicate the method used to access the console of'
|
||||
'The managed node.')
|
||||
},
|
||||
'virtualization.host': {
|
||||
'description': ('Hypervisor where this node does/should reside'),
|
||||
'appliesto': ['vm'],
|
||||
},
|
||||
'virtualization.computepool': {
|
||||
'description': ('Set of compute resources this node is permitted to'
|
||||
' be created on/be migrated to'),
|
||||
'appliesto': ['vm'],
|
||||
},
|
||||
'virtualization.storagemodel': {
|
||||
'description': ('The model of storage adapter to emulate in a virtual'
|
||||
'machine. Defaults to virtio-blk for KVM, vmscsi for'
|
||||
'VMware'),
|
||||
'appliesto': ['vm'],
|
||||
},
|
||||
'virtualization.nicmodel': {
|
||||
'description': ('The model of NIC adapter to emulate in a virtual'
|
||||
'machine. Defaults to virtio-net for KVM, vmxnet3 for'
|
||||
'VMware'),
|
||||
'appliesto': ['vm'],
|
||||
},
|
||||
# 'virtualization.host': {
|
||||
# 'description': ('Hypervisor where this node does/should reside'),
|
||||
# 'appliesto': ['vm'],
|
||||
# },
|
||||
# 'virtualization.computepool': {
|
||||
# 'description': ('Set of compute resources this node is permitted to'
|
||||
# ' be created on/be migrated to'),
|
||||
# 'appliesto': ['vm'],
|
||||
# },
|
||||
# 'virtualization.storagemodel': {
|
||||
# 'description': ('The model of storage adapter to emulate in a virtual'
|
||||
# 'machine. Defaults to virtio-blk for KVM, vmscsi for'
|
||||
# 'VMware'),
|
||||
# 'appliesto': ['vm'],
|
||||
# },
|
||||
# 'virtualization.nicmodel': {
|
||||
# 'description': ('The model of NIC adapter to emulate in a virtual'
|
||||
# 'machine. Defaults to virtio-net for KVM, vmxnet3 for'
|
||||
# 'VMware'),
|
||||
# 'appliesto': ['vm'],
|
||||
# },
|
||||
'hardwaremanagement.manager': {
|
||||
'description': 'The management address dedicated to this node',
|
||||
},
|
||||
@ -158,56 +159,56 @@ node = {
|
||||
'description': 'The method used to perform operations such as power '
|
||||
'control, get sensor data, get inventory, and so on. '
|
||||
},
|
||||
'enclosure.manager': {
|
||||
'description': "The management device for this node's chassis",
|
||||
'appliesto': ['system'],
|
||||
},
|
||||
'enclosure.bay': {
|
||||
'description': 'The bay in the enclosure, if any',
|
||||
'appliesto': ['system'],
|
||||
},
|
||||
'enclosure.type': {
|
||||
'description': '''The type of enclosure in use (e.g. IBM BladeCenter,
|
||||
IBM Flex)''',
|
||||
'appliesto': ['system'],
|
||||
},
|
||||
'inventory.serialnumber': {
|
||||
'description': 'The manufacturer serial number of node',
|
||||
},
|
||||
'inventory.uuid': {
|
||||
'description': 'The UUID of the node as presented in DMI',
|
||||
},
|
||||
'inventory.modelnumber': {
|
||||
'description': 'The manufacturer dictated model number for the node',
|
||||
},
|
||||
'inventory.snmpengineid': {
|
||||
'description': 'The SNMP Engine id used by this node',
|
||||
},
|
||||
'secret.snmpuser': {
|
||||
'description': 'The user to use for SNMPv3 access to this node',
|
||||
},
|
||||
'secret.snmppassphrase': {
|
||||
'description': 'The passphrase to use for SNMPv3 access to this node',
|
||||
},
|
||||
'secret.snmplocalizedkey': {
|
||||
'description': ("SNMPv3 key localized to this node's SNMP Engine id"
|
||||
'This can be used in lieu of snmppassphrase to avoid'
|
||||
'retaining the passphrase TODO: document procedure'
|
||||
'to commit passphrase to localized key'),
|
||||
},
|
||||
'secret.snmpcommunity': {
|
||||
'description': ('SNMPv1 community string, it is highly recommended to'
|
||||
'step up to SNMPv3'),
|
||||
},
|
||||
'secret.localadminpassphrase': {
|
||||
'description': ('The passphrase to apply to local root/administrator '
|
||||
'account. '
|
||||
'If the environment is 100% Linux, the value may be '
|
||||
'one-way crypted as in /etc/shadow. For Windows, if '
|
||||
'the value is not set or is one-way crypted, the '
|
||||
'local '
|
||||
'Administrator account will be disabled, requiring AD')
|
||||
},
|
||||
# 'enclosure.manager': {
|
||||
# 'description': "The management device for this node's chassis",
|
||||
# 'appliesto': ['system'],
|
||||
# },
|
||||
# 'enclosure.bay': {
|
||||
# 'description': 'The bay in the enclosure, if any',
|
||||
# 'appliesto': ['system'],
|
||||
# },
|
||||
# 'enclosure.type': {
|
||||
# 'description': '''The type of enclosure in use (e.g. IBM BladeCenter,
|
||||
#IBM Flex)''',
|
||||
# 'appliesto': ['system'],
|
||||
# },
|
||||
# 'inventory.serialnumber': {
|
||||
# 'description': 'The manufacturer serial number of node',
|
||||
# },
|
||||
# 'inventory.uuid': {
|
||||
# 'description': 'The UUID of the node as presented in DMI',
|
||||
# },
|
||||
# 'inventory.modelnumber': {
|
||||
# 'description': 'The manufacturer dictated model number for the node',
|
||||
# },
|
||||
# 'inventory.snmpengineid': {
|
||||
# 'description': 'The SNMP Engine id used by this node',
|
||||
# },
|
||||
# 'secret.snmpuser': {
|
||||
# 'description': 'The user to use for SNMPv3 access to this node',
|
||||
# },
|
||||
# 'secret.snmppassphrase': {
|
||||
# 'description': 'The passphrase to use for SNMPv3 access to this node',
|
||||
# },
|
||||
# 'secret.snmplocalizedkey': {
|
||||
# 'description': ("SNMPv3 key localized to this node's SNMP Engine id"
|
||||
# 'This can be used in lieu of snmppassphrase to avoid'
|
||||
# 'retaining the passphrase TODO: document procedure'
|
||||
# 'to commit passphrase to localized key'),
|
||||
# },
|
||||
# 'secret.snmpcommunity': {
|
||||
# 'description': ('SNMPv1 community string, it is highly recommended to'
|
||||
# 'step up to SNMPv3'),
|
||||
# },
|
||||
# 'secret.localadminpassphrase': {
|
||||
# 'description': ('The passphrase to apply to local root/administrator '
|
||||
# 'account. '
|
||||
# 'If the environment is 100% Linux, the value may be '
|
||||
# 'one-way crypted as in /etc/shadow. For Windows, if '
|
||||
# 'the value is not set or is one-way crypted, the '
|
||||
# 'local '
|
||||
# 'Administrator account will be disabled, requiring AD')
|
||||
# },
|
||||
'secret.ipmikg': {
|
||||
'description': 'Optional Integrity key for IPMI communication'
|
||||
},
|
||||
@ -224,19 +225,19 @@ IBM Flex)''',
|
||||
'to connect over the network and value is not set, '
|
||||
'PASSW0RD is attempted')
|
||||
},
|
||||
'secret.managementuser': {
|
||||
'description': ('Username to be set and used by protocols like SSH and '
|
||||
'HTTP where client provides passphrase over the network.'
|
||||
'Given the distinct security models betwen this class '
|
||||
'of protocols and SNMP and IPMI, snmp and ipmi utilize '
|
||||
'dedicated values.'),
|
||||
},
|
||||
'secret.managementpassphrase': {
|
||||
'description': ('Passphrase to be set and used by protocols like SSH '
|
||||
'and HTTP, where client sends passphrase over the '
|
||||
'network. Given distinct security models between '
|
||||
'this class of protocols, SNMP, and IPMI, SNMP and '
|
||||
'IPMI are given their own settings with distinct '
|
||||
'behaviors'),
|
||||
},
|
||||
# 'secret.managementuser': {
|
||||
# 'description': ('Username to be set and used by protocols like SSH and '
|
||||
# 'HTTP where client provides passphrase over the network.'
|
||||
# 'Given the distinct security models betwen this class '
|
||||
# 'of protocols and SNMP and IPMI, snmp and ipmi utilize '
|
||||
# 'dedicated values.'),
|
||||
# },
|
||||
# 'secret.managementpassphrase': {
|
||||
# 'description': ('Passphrase to be set and used by protocols like SSH '
|
||||
# 'and HTTP, where client sends passphrase over the '
|
||||
# 'network. Given distinct security models between '
|
||||
# 'this class of protocols, SNMP, and IPMI, SNMP and '
|
||||
# 'IPMI are given their own settings with distinct '
|
||||
# 'behaviors'),
|
||||
# },
|
||||
}
|
||||
|
@ -554,6 +554,15 @@ class ConfigManager(object):
|
||||
nodecfg = self._cfgstore['nodes'][node]
|
||||
self._do_inheritance(nodecfg, attr, group)
|
||||
|
||||
def del_nodes(self, nodes):
|
||||
if 'nodes' not in self._cfgstore:
|
||||
return
|
||||
for node in nodes:
|
||||
if node in self._cfgstore['nodes']:
|
||||
self._sync_groups_to_node(node=node, groups=[])
|
||||
del self._cfgstore['nodes'][node]
|
||||
self._bg_sync_to_file()
|
||||
|
||||
def set_node_attributes(self, attribmap):
|
||||
if 'nodes' not in self._cfgstore:
|
||||
self._cfgstore['nodes'] = {}
|
||||
|
@ -6,6 +6,7 @@
|
||||
import base64
|
||||
import Cookie
|
||||
import confluent.auth as auth
|
||||
import confluent.config.attributes as attribs
|
||||
import confluent.consoleserver as consoleserver
|
||||
import confluent.exceptions as exc
|
||||
import confluent.messages
|
||||
@ -31,6 +32,22 @@ opmap = {
|
||||
'DELETE': 'delete',
|
||||
}
|
||||
|
||||
def node_creation_resources():
|
||||
yield confluent.messages.Attributes(
|
||||
kv={ 'name': None}, desc="Name of the node").html() + '<br>'
|
||||
for attr in sorted(attribs.node.iterkeys()):
|
||||
if attr.startswith("secret."):
|
||||
yield confluent.messages.CryptedAttributes(
|
||||
kv={ attr: None }, desc=attribs.node[attr]['description']).html() + \
|
||||
'<br>'
|
||||
else:
|
||||
yield confluent.messages.Attributes(
|
||||
kv={ attr: None }, desc=attribs.node[attr]['description']).html() + \
|
||||
'<br>'
|
||||
|
||||
create_resource_functions = {
|
||||
'/node/': node_creation_resources,
|
||||
}
|
||||
|
||||
def _sessioncleaner():
|
||||
while (1):
|
||||
@ -149,13 +166,18 @@ def _assign_consessionid(consolesession):
|
||||
def resourcehandler(env, start_response):
|
||||
"""Function to handle new wsgi requests
|
||||
"""
|
||||
authorized = _authorize_request(env)
|
||||
mimetype = _pick_mimetype(env)
|
||||
reqbody = None
|
||||
reqtype = None
|
||||
if 'CONTENT_LENGTH' in env and int(env['CONTENT_LENGTH']) > 0:
|
||||
reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
|
||||
reqtype = env['CONTENT_TYPE']
|
||||
operation = opmap[env['REQUEST_METHOD']]
|
||||
querydict = _get_query_dict(env, reqbody, reqtype)
|
||||
if 'restexplorerop' in querydict:
|
||||
operation = querydict['restexplorerop']
|
||||
del querydict['restexplorerop']
|
||||
authorized = _authorize_request(env)
|
||||
if authorized['code'] == 401:
|
||||
start_response('401 Authentication Required',
|
||||
[('Content-type', 'text/plain'),
|
||||
@ -174,11 +196,6 @@ def resourcehandler(env, start_response):
|
||||
headers.extend(("Set-Cookie", m.OutputString())
|
||||
for m in authorized['cookie'].values())
|
||||
cfgmgr = authorized['cfgmgr']
|
||||
operation = opmap[env['REQUEST_METHOD']]
|
||||
querydict = _get_query_dict(env, reqbody, reqtype)
|
||||
if 'restexplorerop' in querydict:
|
||||
operation = querydict['restexplorerop']
|
||||
del querydict['restexplorerop']
|
||||
if '/console/session' in env['PATH_INFO']:
|
||||
#hard bake JSON into this path, do not support other incarnations
|
||||
prefix, _, _ = env['PATH_INFO'].partition('/console/session')
|
||||
@ -235,46 +252,60 @@ def resourcehandler(env, start_response):
|
||||
start_response('400 Bad Request', headers)
|
||||
yield '400 - Bad Request'
|
||||
return
|
||||
start_response('200 OK', headers)
|
||||
pagecontent = ""
|
||||
if mimetype == 'text/html':
|
||||
for datum in _assemble_html(hdlr, resource, querydict, url):
|
||||
yield datum
|
||||
pagecontent += datum
|
||||
else:
|
||||
for datum in _assemble_json(hdlr, resource, url):
|
||||
yield datum
|
||||
pagecontent += datum
|
||||
start_response('200 OK', headers)
|
||||
yield pagecontent
|
||||
|
||||
|
||||
def _assemble_html(responses, resource, querydict, url):
|
||||
yield '<html><head><title>'
|
||||
yield 'Confluent REST Explorer: ' + resource + '</title></head>'
|
||||
yield '<body><form action="' + resource + '" method="post">'
|
||||
yield '<html><head><title>' \
|
||||
'Confluent REST Explorer: ' + url + '</title></head>' \
|
||||
'<body><form action="' + resource + '" method="post">'
|
||||
if querydict:
|
||||
yield 'Response to input data:<br>'
|
||||
yield json.dumps(querydict, separators=(',', ': '),
|
||||
indent=4, sort_keys=True)
|
||||
yield '<hr>'
|
||||
yield 'Only fields that have their boxes checked will have their '
|
||||
yield 'respective values honored by the confluent server.<hr>'
|
||||
yield '<input type="hidden" name="restexplorerop" value="update">'
|
||||
yield '<input type="hidden" name="restexplorerhonorkey" value="">'
|
||||
yield '<a rel="self" href="%s">%s</a><br>' % (resource, resource)
|
||||
yield 'Response to input data:<br>' + \
|
||||
json.dumps(querydict, separators=(',', ': '),
|
||||
indent=4, sort_keys=True) + '<hr>'
|
||||
yield 'Only fields that have their boxes checked will have their ' \
|
||||
'respective values honored by the confluent server.<hr>' \
|
||||
'<input type="hidden" name="restexplorerhonorkey" value="">' + \
|
||||
'<a rel="self" href="%s">%s</a><br>' % (resource, resource)
|
||||
if url == '/':
|
||||
pass
|
||||
iscollection = True
|
||||
elif resource[-1] == '/':
|
||||
iscollection = True
|
||||
yield '<a rel="collection" href="../">../</a><br>'
|
||||
|
||||
else:
|
||||
iscollection = False
|
||||
yield '<a rel="collection" href="./">./</a><br>'
|
||||
pendingrsp = []
|
||||
for rsp in responses:
|
||||
if isinstance(rsp, confluent.messages.LinkRelation):
|
||||
yield rsp.html()
|
||||
yield "<br>"
|
||||
yield rsp.html() + "<br>"
|
||||
else:
|
||||
pendingrsp.append(rsp)
|
||||
for rsp in pendingrsp:
|
||||
yield rsp.html()
|
||||
yield "<br>"
|
||||
yield '<input value="PUT" type="submit"></form></body></html>'
|
||||
yield rsp.html()+ "<br>"
|
||||
if iscollection:
|
||||
localpath = url[:-2]
|
||||
try:
|
||||
firstpass = True
|
||||
for y in create_resource_functions[url]():
|
||||
if firstpass:
|
||||
yield "<hr>Define new %s:<BR>" % url.split("/")[-2]
|
||||
firstpass = False
|
||||
yield y
|
||||
yield '<input value="create" name="restexplorerop" type="submit"></form></body></html>'
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
yield '<input value="update" name="restexplorerop" type="submit"></form></body></html>'
|
||||
|
||||
|
||||
def _assemble_json(responses, resource, url):
|
||||
@ -285,55 +316,38 @@ def _assemble_json(responses, resource, url):
|
||||
#once and hold on to all the data in memory
|
||||
docomma = False
|
||||
links = {
|
||||
'self': ['{"href":"%s"}' % resource],
|
||||
'self': {"href":resource},
|
||||
}
|
||||
if url == '/':
|
||||
pass
|
||||
elif resource[-1] == '/':
|
||||
links['collection'] = ['{"href":"%s"}' % '../']
|
||||
links['collection'] = {"href":"../"}
|
||||
else:
|
||||
links['collection'] = ['{"href":"%s"}' % './']
|
||||
yield '{'
|
||||
hadrsp = False
|
||||
links['collection'] = {"href":"./"}
|
||||
rspdata = {}
|
||||
for rsp in responses:
|
||||
if isinstance(rsp, confluent.messages.LinkRelation):
|
||||
haldata = rsp.json_hal()
|
||||
haldata = rsp.raw_rel()
|
||||
for hk in haldata.iterkeys():
|
||||
if hk in links:
|
||||
links[hk].append(haldata[hk])
|
||||
if isinstance(links[hk], list):
|
||||
links[hk].append(haldata[hk])
|
||||
else:
|
||||
links[hk] = [ links[hk], haldata[hk] ]
|
||||
else:
|
||||
links[hk] = [haldata[hk]]
|
||||
continue
|
||||
hadrsp = True
|
||||
if docomma:
|
||||
yield ','
|
||||
links[hk] = haldata[hk]
|
||||
else:
|
||||
docomma = True
|
||||
yield rsp.json()
|
||||
docomma = False
|
||||
if hadrsp:
|
||||
yield ','
|
||||
yield '"_links": {'
|
||||
groupcomma = False
|
||||
for link in links.iterkeys():
|
||||
if groupcomma:
|
||||
yield ','
|
||||
else:
|
||||
groupcomma = True
|
||||
yield json.dumps(link) + ":"
|
||||
if len(links[link]) == 1:
|
||||
yield links[link][0]
|
||||
else:
|
||||
yield '['
|
||||
for lk in links[link]:
|
||||
if docomma:
|
||||
yield ','
|
||||
rsp = rsp.rawdata()
|
||||
for dk in rsp.iterkeys():
|
||||
if dk in rspdata:
|
||||
if isinstance(rspdata[dk], list):
|
||||
rspdata[dk].append(rsp[dk])
|
||||
else:
|
||||
rspdata[dk] = [ rspdata[dk], rsp[dk] ]
|
||||
else:
|
||||
docomma = True
|
||||
yield lk
|
||||
yield ']'
|
||||
yield '}'
|
||||
yield '}'
|
||||
rspdata[dk] = rsp[dk]
|
||||
rspdata["_links"] = links
|
||||
yield json.dumps(rspdata, sort_keys=True, indent=4)
|
||||
|
||||
|
||||
def serve():
|
||||
|
@ -18,6 +18,12 @@ class ConfluentMessage(object):
|
||||
jsonsnippet = json.dumps(self.kvpairs, separators=(',', ':'))[1:-1]
|
||||
return jsonsnippet
|
||||
|
||||
def rawdata(self):
|
||||
"""Return pythonic representation of the response.
|
||||
|
||||
Used by httpapi while assembling data prior to json serialization"""
|
||||
return self.kvpairs
|
||||
|
||||
def strip_node(self, node):
|
||||
self.kvpairs = self.kvpairs[node]
|
||||
|
||||
@ -29,29 +35,38 @@ class ConfluentMessage(object):
|
||||
value = self.defaultvalue
|
||||
note = ''
|
||||
type = self.defaulttype
|
||||
try:
|
||||
desc = self.desc
|
||||
except:
|
||||
desc = ''
|
||||
if 'value' in val:
|
||||
value = val['value']
|
||||
if value is None:
|
||||
value = ''
|
||||
if 'note' in val:
|
||||
note = '(' + val['note'] + ')'
|
||||
if isinstance(val, list):
|
||||
snippet += key + ":"
|
||||
for v in val:
|
||||
snippet += \
|
||||
'<input type="%s" name="%s" value="%s">%s' % (
|
||||
type, key, v, note)
|
||||
'<input type="%s" name="%s" value="%s" title="%s">%s' % (
|
||||
type, key, v, desc, note)
|
||||
snippet += \
|
||||
'<input type="%s" name="%s" value="">%s' % (
|
||||
type, key, note)
|
||||
'<input type="%s" name="%s" value="" title="%s">%s' % (
|
||||
type, key, desc, note)
|
||||
snippet += '<input type="checkbox" name="restexplorerhonorkey" '
|
||||
snippet += 'value="%s">' % (key)
|
||||
return snippet
|
||||
snippet += key + ":" + \
|
||||
'<input type="%s" name="%s" value="%s">%s' % (
|
||||
type, key, value, note)
|
||||
'<input type="%s" name="%s" value="%s" title="%s">%s' % (
|
||||
type, key, value, desc, note)
|
||||
snippet += '<input type="checkbox" name="restexplorerhonorkey" '
|
||||
snippet += 'value="%s">' % (key)
|
||||
return snippet
|
||||
|
||||
class DeletedResource(ConfluentMessage):
|
||||
def __init__(self):
|
||||
self.kvpairs = {}
|
||||
|
||||
class ConfluentChoiceMessage(ConfluentMessage):
|
||||
|
||||
@ -74,17 +89,39 @@ class ConfluentChoiceMessage(ConfluentMessage):
|
||||
|
||||
class LinkRelation(ConfluentMessage):
|
||||
def json_hal(self):
|
||||
"""Provide json_hal style representation of the relation.
|
||||
|
||||
This currently only makes sense for the socket api.
|
||||
"""
|
||||
return {self.rel: '{ "href": "%s" }' % self.href }
|
||||
|
||||
def raw_rel(self):
|
||||
"""Provide python structure of the relation.
|
||||
|
||||
This currently is only sensible to consume from httpapi.
|
||||
"""
|
||||
return { self.rel: { "href": self.href }}
|
||||
|
||||
def html(self):
|
||||
"""Provide an html representation of the link relation.
|
||||
|
||||
This is used by the API explorer aspect of httpapi"""
|
||||
return '<a href="%s" rel="%s">%s</a>' % (self.href, self.rel, self.href)
|
||||
#return '<a href="%s" rel="%s">%s</a><input type="submit" name="restexprerorop" value="delete:%s"' % (self.href, self.rel, self.href, self.href)
|
||||
|
||||
|
||||
|
||||
class ChildCollection(LinkRelation):
|
||||
def __init__(self, collname):
|
||||
def __init__(self, collname, candelete=False):
|
||||
self.rel = 'item'
|
||||
self.href = collname
|
||||
self.candelete = candelete
|
||||
|
||||
def html(self):
|
||||
if self.candelete:
|
||||
return '<a href="%s" rel="%s">%s</a> . . . . . . . . . . . . . . . . . . <button type="submit" name="restexplorerop" value="delete" formaction="%s">delete</button>' % (self.href, self.rel, self.href, self.href)
|
||||
else:
|
||||
return '<a href="%s" rel="%s">%s</a>' % (self.href, self.rel, self.href)
|
||||
|
||||
def get_input_message(path, operation, inputdata, nodes=None):
|
||||
if path[0] == 'power' and path[1] == 'state' and operation != 'retrieve':
|
||||
@ -167,7 +204,7 @@ class BootDevice(ConfluentChoiceMessage):
|
||||
|
||||
def __init__(self, node, device):
|
||||
if device not in self.valid_values:
|
||||
raise Exception("Invalid boot device argument passed in")
|
||||
raise Exception("Invalid boot device argument passed in: %s" % device)
|
||||
self.kvpairs = {
|
||||
node: {
|
||||
'bootdevice': { 'value': device },
|
||||
@ -216,13 +253,17 @@ class PowerState(ConfluentChoiceMessage):
|
||||
}
|
||||
|
||||
class Attributes(ConfluentMessage):
|
||||
def __init__(self, node, kv):
|
||||
def __init__(self, node=None, kv=None, desc=None):
|
||||
self.desc = desc
|
||||
nkv = {}
|
||||
for key in kv.iterkeys():
|
||||
nkv[key] = { 'value': kv[key] }
|
||||
self.kvpairs = {
|
||||
node: nkv
|
||||
}
|
||||
if node is None:
|
||||
self.kvpairs = nkv
|
||||
else:
|
||||
self.kvpairs = {
|
||||
node: nkv
|
||||
}
|
||||
|
||||
class ListAttributes(ConfluentMessage):
|
||||
def __init__(self, node, kv):
|
||||
@ -233,11 +274,15 @@ class ListAttributes(ConfluentMessage):
|
||||
class CryptedAttributes(Attributes):
|
||||
defaulttype = 'password'
|
||||
|
||||
def __init__(self, node, kv):
|
||||
def __init__(self, node=None, kv=None, desc=None):
|
||||
# for now, just keep the dictionary keys and discard crypt value
|
||||
self.desc = desc
|
||||
nkv = {}
|
||||
for key in kv.iterkeys():
|
||||
nkv[key] = { 'note': 'Encrypted' }
|
||||
self.kvpairs = {
|
||||
node: nkv
|
||||
}
|
||||
if node is None:
|
||||
self.kvpairs = nkv
|
||||
else:
|
||||
self.kvpairs = {
|
||||
node: nkv
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ def iterate_collections(iterable):
|
||||
for coll in iterable:
|
||||
if coll[-1] != '/':
|
||||
coll = coll + '/'
|
||||
yield msg.ChildCollection(coll)
|
||||
yield msg.ChildCollection(coll, candelete=True)
|
||||
|
||||
def iterate_resources(fancydict):
|
||||
for resource in fancydict.iterkeys():
|
||||
@ -107,6 +107,14 @@ def iterate_resources(fancydict):
|
||||
resource += '/'
|
||||
yield msg.ChildCollection(resource)
|
||||
|
||||
def delete_node_collection(collectionpath, configmanager):
|
||||
if len(collectionpath) == 2: #just node
|
||||
node = collectionpath[-1]
|
||||
configmanager.del_nodes([node])
|
||||
yield msg.DeletedResource()
|
||||
else:
|
||||
raise Exception("Not implemented")
|
||||
|
||||
def enumerate_node_collection(collectionpath, configmanager):
|
||||
if collectionpath == [ 'node' ]: #it is simple '/node/', need a list of nodes
|
||||
return iterate_collections(configmanager.get_nodes())
|
||||
@ -115,10 +123,21 @@ def enumerate_node_collection(collectionpath, configmanager):
|
||||
return iterate_resources(collection)
|
||||
|
||||
|
||||
def create_node(inputdata, configmanager):
|
||||
try:
|
||||
nodename = inputdata['name']
|
||||
del inputdata['name']
|
||||
attribmap = { nodename: inputdata }
|
||||
except KeyError:
|
||||
raise exc.InvalidArgumentException()
|
||||
configmanager.set_node_attributes(attribmap)
|
||||
|
||||
|
||||
def enumerate_collections(collections):
|
||||
for collection in collections:
|
||||
yield msg.ChildCollection(collection)
|
||||
|
||||
|
||||
def handle_path(path, operation, configmanager, inputdata=None):
|
||||
'''Given a full path request, return an object.
|
||||
|
||||
@ -144,9 +163,19 @@ def handle_path(path, operation, configmanager, inputdata=None):
|
||||
try:
|
||||
node = pathcomponents[1]
|
||||
except IndexError: # doesn't actually have a long enough path
|
||||
# this is enumerating a list of nodes
|
||||
if operation == "delete":
|
||||
raise exc.InvalidArgumentException()
|
||||
if operation == "create":
|
||||
create_node(inputdata, configmanager)
|
||||
return iterate_collections(configmanager.get_nodes())
|
||||
if iscollection:
|
||||
return enumerate_node_collection(pathcomponents, configmanager)
|
||||
if operation == "delete":
|
||||
return delete_node_collection(pathcomponents, configmanager)
|
||||
elif operation == "retrieve":
|
||||
return enumerate_node_collection(pathcomponents, configmanager)
|
||||
else:
|
||||
raise Exception("TODO here")
|
||||
del pathcomponents[0:2]
|
||||
try:
|
||||
plugroute = nested_lookup(noderesources, pathcomponents).routeinfo
|
||||
|
@ -251,6 +251,9 @@ class IpmiHandler(object):
|
||||
def handle_request(self):
|
||||
while not self.loggedin:
|
||||
wait_on_ipmi()
|
||||
bootdevices = {
|
||||
'optical': 'cd'
|
||||
}
|
||||
if self.element == [ 'power', 'state' ]:
|
||||
if 'read' == self.op:
|
||||
power = self.call_ipmicmd(self.ipmicmd.get_power)
|
||||
@ -266,12 +269,15 @@ class IpmiHandler(object):
|
||||
elif self.element == [ 'boot', 'device' ]:
|
||||
if 'read' == self.op:
|
||||
bootdev = self.call_ipmicmd(self.ipmicmd.get_bootdev)
|
||||
print repr(bootdev)
|
||||
if bootdev['bootdev'] in bootdevices:
|
||||
bootdev['bootdev'] = bootdevices[bootdev['bootdev']]
|
||||
return msg.BootDevice(node=self.node,
|
||||
device=bootdev['bootdev'])
|
||||
elif 'update' == self.op:
|
||||
bootdev = self.inputdata.bootdevice(self.node)
|
||||
bootdev = self.call_ipmicmd(self.ipmicmd.set_bootdev, bootdev)
|
||||
if bootdev['bootdev'] in bootdevices:
|
||||
bootdev['bootdev'] = bootdevices[bootdev['bootdev']]
|
||||
return msg.BootDevice(node=self.node,
|
||||
device=bootdev['bootdev'])
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user