mirror of
https://opendev.org/x/pyghmi
synced 2025-02-20 12:30:48 +00:00
Add XCC storage management
Provide entry point and port the XCC storage management from the ipmi backend. Change-Id: Id599063dd9542aecbb58bab1953831501dd14e3d
This commit is contained in:
parent
3b8a8b93c3
commit
4b204ee1c9
@ -31,6 +31,7 @@ import pyghmi.media as media
|
||||
import pyghmi.util.webclient as webclient
|
||||
from pyghmi.util.parse import parse_time
|
||||
import pyghmi.redfish.oem.lookup as oem
|
||||
# import pyghmi.storage as storage
|
||||
import re
|
||||
from dateutil import tz
|
||||
|
||||
@ -1166,6 +1167,37 @@ class Command(object):
|
||||
imageurl = vminfo['Image'].replace('/' + vminfo['ImageName'], '')
|
||||
yield media.Media(vminfo['ImageName'], imageurl)
|
||||
|
||||
def get_storage_configuration(self):
|
||||
""""Get storage configuration data
|
||||
|
||||
Retrieves the storage configuration from the target. Data is given
|
||||
about disks, pools, and volumes. When referencing something, use the
|
||||
relevant 'cfgpath' attribute to describe it. It is not guaranteed that
|
||||
cfgpath will be consistent version to version, so a lookup is suggested
|
||||
in end user applications.
|
||||
|
||||
:return: A pyghmi.storage.ConfigSpec object describing current config
|
||||
"""
|
||||
return self.oem.get_storage_configuration()
|
||||
|
||||
def remove_storage_configuration(self, cfgspec):
|
||||
"""Remove specified storage configuration from controller.
|
||||
|
||||
:param cfgspec: A pyghmi.storage.ConfigSpec describing what to remove
|
||||
:return:
|
||||
"""
|
||||
return self.oem.remove_storage_configuration(cfgspec)
|
||||
|
||||
def apply_storage_configuration(self, cfgspec=None):
|
||||
"""Evaluate a configuration for validity
|
||||
|
||||
This will check if configuration is currently available and, if given,
|
||||
whether the specified cfgspec can be applied.
|
||||
:param cfgspec: A pyghmi.storage.ConfigSpec describing desired oonfig
|
||||
:return:
|
||||
"""
|
||||
return self.oem.apply_storage_configuration(cfgspec)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
import sys
|
||||
|
@ -32,7 +32,7 @@ class OEMHandler(object):
|
||||
|
||||
def get_description(self):
|
||||
return {}
|
||||
|
||||
|
||||
def get_firmware_inventory(self, components):
|
||||
return []
|
||||
|
||||
@ -40,6 +40,18 @@ class OEMHandler(object):
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def get_storage_configuration(self):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
def remove_storage_configuration(self, cfgspec):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
def apply_storage_configuration(self, cfgspec):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
def _do_web_request(self, url, payload=None, method=None, cache=True):
|
||||
res = None
|
||||
if cache and payload is None and method is None:
|
||||
|
@ -17,8 +17,10 @@ from pyghmi.util.parse import parse_time
|
||||
import errno
|
||||
import json
|
||||
import socket
|
||||
import time
|
||||
import pyghmi.ipmi.private.util as util
|
||||
import pyghmi.exceptions as pygexc
|
||||
import pyghmi.storage as storage
|
||||
|
||||
|
||||
class OEMHandler(generic.OEMHandler):
|
||||
@ -122,6 +124,263 @@ class OEMHandler(generic.OEMHandler):
|
||||
yield adpinfo
|
||||
raise pygexc.BypassGenericBehavior()
|
||||
|
||||
def get_storage_configuration(self, logout=True):
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/function/raid_alldevices?params=storage_GetAllDevices')
|
||||
standalonedisks = []
|
||||
pools = []
|
||||
for item in rsp.get('items', []):
|
||||
for cinfo in item['controllerInfo']:
|
||||
cid = cinfo['id']
|
||||
for pool in cinfo['pools']:
|
||||
volumes = []
|
||||
disks = []
|
||||
spares = []
|
||||
for volume in pool['volumes']:
|
||||
volumes.append(
|
||||
storage.Volume(name=volume['name'],
|
||||
size=volume['capacity'],
|
||||
status=volume['statusStr'],
|
||||
id=(cid, volume['id'])))
|
||||
for disk in pool['disks']:
|
||||
diskinfo = storage.Disk(
|
||||
name=disk['name'], description=disk['type'],
|
||||
id=(cid, disk['id']), status=disk['RAIDState'],
|
||||
serial=disk['serialNo'], fru=disk['fruPartNo'])
|
||||
if disk['RAIDState'] == 'Dedicated Hot Spare':
|
||||
spares.append(diskinfo)
|
||||
else:
|
||||
disks.append(diskinfo)
|
||||
totalsize = pool['totalCapacityStr'].replace('GB', '')
|
||||
totalsize = int(float(totalsize) * 1024)
|
||||
freesize = pool['freeCapacityStr'].replace('GB', '')
|
||||
freesize = int(float(freesize) * 1024)
|
||||
pools.append(storage.Array(
|
||||
disks=disks, raid=pool['rdlvlstr'], volumes=volumes,
|
||||
id=(cid, pool['id']), hotspares=spares,
|
||||
capacity=totalsize, available_capacity=freesize))
|
||||
for disk in cinfo.get('unconfiguredDisks', ()):
|
||||
# can be unused, global hot spare, or JBOD
|
||||
standalonedisks.append(
|
||||
storage.Disk(
|
||||
name=disk['name'], description=disk['type'],
|
||||
id=(cid, disk['id']), status=disk['RAIDState'],
|
||||
serial=disk['serialNo'], fru=disk['fruPartNo']))
|
||||
return storage.ConfigSpec(disks=standalonedisks, arrays=pools)
|
||||
|
||||
def _set_drive_state(self, disk, state):
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/function',
|
||||
{'raidlink_DiskStateAction': '{0},{1}'.format(disk.id[1], state)})
|
||||
if rsp.get('return', -1) != 0:
|
||||
raise Exception(
|
||||
'Unexpected return to set disk state: {0}'.format(
|
||||
rsp.get('return', -1)))
|
||||
|
||||
def _make_available(self, disk, realcfg):
|
||||
# 8 if jbod, 4 if hotspare.., leave alone if already...
|
||||
currstatus = self._get_status(disk, realcfg)
|
||||
newstate = None
|
||||
if currstatus == 'Unconfigured Good':
|
||||
return
|
||||
elif currstatus.lower() == 'global hot spare':
|
||||
newstate = 4
|
||||
elif currstatus.lower() == 'jbod':
|
||||
newstate = 8
|
||||
self._set_drive_state(disk, newstate)
|
||||
|
||||
def _make_jbod(self, disk, realcfg):
|
||||
currstatus = self._get_status(disk, realcfg)
|
||||
if currstatus.lower() == 'jbod':
|
||||
return
|
||||
self._make_available(disk, realcfg)
|
||||
self._set_drive_state(disk, 16)
|
||||
|
||||
def _make_global_hotspare(self, disk, realcfg):
|
||||
currstatus = self._get_status(disk, realcfg)
|
||||
if currstatus.lower() == 'global hot spare':
|
||||
return
|
||||
self._make_available(disk, realcfg)
|
||||
self._set_drive_state(disk, 1)
|
||||
|
||||
def _get_status(self, disk, realcfg):
|
||||
for cfgdisk in realcfg.disks:
|
||||
if disk.id == cfgdisk.id:
|
||||
currstatus = cfgdisk.status
|
||||
break
|
||||
else:
|
||||
raise pygexc.InvalidParameterValue('Requested disk not found')
|
||||
return currstatus
|
||||
|
||||
def _raid_number_map(self, controller):
|
||||
themap = {}
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/function/raid_conf?'
|
||||
'params=raidlink_GetDisksToConf,{0}'.format(controller))
|
||||
for lvl in rsp['items'][0]['supported_raidlvl']:
|
||||
mapdata = (lvl['rdlvl'], lvl['maxSpan'])
|
||||
raidname = lvl['rdlvlstr'].replace(' ', '').lower()
|
||||
themap[raidname] = mapdata
|
||||
raidname = raidname.replace('raid', 'r')
|
||||
themap[raidname] = mapdata
|
||||
raidname = raidname.replace('r', '')
|
||||
themap[raidname] = mapdata
|
||||
return themap
|
||||
|
||||
def _wait_storage_async(self):
|
||||
rsp = {'items': [{'status': 0}]}
|
||||
while rsp['items'][0]['status'] == 0:
|
||||
time.sleep(1)
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/function/raid_conf?params=raidlink_QueryAsyncStatus')
|
||||
|
||||
def _parse_array_spec(self, arrayspec):
|
||||
controller = None
|
||||
if arrayspec.disks:
|
||||
for disk in list(arrayspec.disks) + list(arrayspec.hotspares):
|
||||
if controller is None:
|
||||
controller = disk.id[0]
|
||||
if controller != disk.id[0]:
|
||||
raise pygexc.UnsupportedFunctionality(
|
||||
'Cannot span arrays across controllers')
|
||||
raidmap = self._raid_number_map(controller)
|
||||
if not raidmap:
|
||||
raise pygexc.InvalidParameterValue(
|
||||
'There are no available drives for a new array')
|
||||
requestedlevel = str(arrayspec.raid).lower()
|
||||
if requestedlevel not in raidmap:
|
||||
raise pygexc.InvalidParameterValue(
|
||||
'Requested RAID "{0}" not available on this '
|
||||
'system with currently available drives'.format(
|
||||
requestedlevel))
|
||||
rdinfo = raidmap[str(arrayspec.raid).lower()]
|
||||
rdlvl = str(rdinfo[0])
|
||||
defspan = 1 if rdinfo[1] == 1 else 2
|
||||
spancount = defspan if arrayspec.spans is None else arrayspec.spans
|
||||
drivesperspan = str(len(arrayspec.disks) // int(spancount))
|
||||
hotspares = arrayspec.hotspares
|
||||
drives = arrayspec.disks
|
||||
if hotspares:
|
||||
hstr = '|'.join([str(x.id[1]) for x in hotspares]) + '|'
|
||||
else:
|
||||
hstr = ''
|
||||
drvstr = '|'.join([str(x.id[1]) for x in drives]) + '|'
|
||||
pth = '/api/function/raid_conf?params=raidlink_CheckConfisValid'
|
||||
args = [pth, controller, rdlvl, spancount, drivesperspan, drvstr,
|
||||
hstr]
|
||||
url = ','.join([str(x) for x in args])
|
||||
rsp = self.wc.grab_json_response(url)
|
||||
if rsp['items'][0]['errcode'] == 16:
|
||||
raise pygexc.InvalidParameterValue('Incorrect number of disks')
|
||||
elif rsp['items'][0]['errcode'] != 0:
|
||||
raise pygexc.InvalidParameterValue(
|
||||
'Invalid configuration: {0}'.format(
|
||||
rsp['items'][0]['errcode']))
|
||||
return {
|
||||
'capacity': rsp['items'][0]['freeCapacity'],
|
||||
'controller': controller,
|
||||
'drives': drvstr,
|
||||
'hotspares': hstr,
|
||||
'raidlevel': rdlvl,
|
||||
'spans': spancount,
|
||||
'perspan': drivesperspan,
|
||||
}
|
||||
else:
|
||||
pass # TODO: adding new volume to existing array would be here
|
||||
|
||||
def _create_array(self, pool):
|
||||
params = self._parse_array_spec(pool)
|
||||
url = '/api/function/raid_conf?params=raidlink_GetDefaultVolProp'
|
||||
args = (url, params['controller'], 0, params['drives'])
|
||||
props = self.wc.grab_json_response(','.join([str(x) for x in args]))
|
||||
props = props['items'][0]
|
||||
volumes = pool.volumes
|
||||
remainingcap = params['capacity']
|
||||
nameappend = 1
|
||||
vols = []
|
||||
currvolnames = None
|
||||
currcfg = None
|
||||
for vol in volumes:
|
||||
if vol.name is None:
|
||||
# need to iterate while there exists a volume of that name
|
||||
if currvolnames is None:
|
||||
currcfg = self.get_storage_configuration(False)
|
||||
currvolnames = set([])
|
||||
for pool in currcfg.arrays:
|
||||
for volume in pool.volumes:
|
||||
currvolnames.add(volume.name)
|
||||
name = props['name'] + '_{0}'.format(nameappend)
|
||||
nameappend += 1
|
||||
while name in currvolnames:
|
||||
name = props['name'] + '_{0}'.format(nameappend)
|
||||
nameappend += 1
|
||||
else:
|
||||
name = vol.name
|
||||
if vol.stripsize:
|
||||
stripsize = int(math.log(vol.stripsize * 2, 2))
|
||||
else:
|
||||
stripsize = props['stripsize']
|
||||
strsize = 'remainder' if vol.size is None else str(vol.size)
|
||||
if strsize in ('all', '100%'):
|
||||
volsize = params['capacity']
|
||||
elif strsize in ('remainder', 'rest'):
|
||||
volsize = remainingcap
|
||||
elif strsize.endswith('%'):
|
||||
volsize = int(params['capacity'] *
|
||||
float(strsize.replace('%', '')) / 100.0)
|
||||
else:
|
||||
try:
|
||||
volsize = int(strsize)
|
||||
except ValueError:
|
||||
raise pygexc.InvalidParameterValue(
|
||||
'Unrecognized size ' + strsize)
|
||||
remainingcap -= volsize
|
||||
if remainingcap < 0:
|
||||
raise pygexc.InvalidParameterValue(
|
||||
'Requested sizes exceed available capacity')
|
||||
vols.append('{0};{1};{2};{3};{4};{5};{6};{7};{8};|'.format(
|
||||
name, volsize, stripsize, props['cpwb'], props['cpra'],
|
||||
props['cpio'], props['ap'], props['dcp'], props['initstate']))
|
||||
url = '/api/function'
|
||||
arglist = '{0},{1},{2},{3},{4},{5},'.format(
|
||||
params['controller'], params['raidlevel'], params['spans'],
|
||||
params['perspan'], params['drives'], params['hotspares'])
|
||||
arglist += ''.join(vols)
|
||||
parms = {'raidlink_AddNewVolWithNaAsync': arglist}
|
||||
rsp = self.wc.grab_json_response(url, parms)
|
||||
if rsp['return'] != 0:
|
||||
raise Exception(
|
||||
'Unexpected response to add volume command: ' + repr(rsp))
|
||||
self._wait_storage_async()
|
||||
|
||||
def remove_storage_configuration(self, cfgspec):
|
||||
realcfg = self.get_storage_configuration(False)
|
||||
for pool in cfgspec.arrays:
|
||||
for volume in pool.volumes:
|
||||
vid = '{0},{1}'.format(volume.id[1], volume.id[0])
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/function', {'raidlink_RemoveVolumeAsync': vid})
|
||||
if rsp.get('return', -1) != 0:
|
||||
raise Exception(
|
||||
'Unexpected return to volume deletion: ' + repr(rsp))
|
||||
self._wait_storage_async()
|
||||
for disk in cfgspec.disks:
|
||||
self._make_available(disk, realcfg)
|
||||
|
||||
def apply_storage_configuration(self, cfgspec):
|
||||
realcfg = self.get_storage_configuration(False)
|
||||
for disk in cfgspec.disks:
|
||||
if disk.status.lower() == 'jbod':
|
||||
self._make_jbod(disk, realcfg)
|
||||
elif disk.status.lower() == 'hotspare':
|
||||
self._make_global_hotspare(disk, realcfg)
|
||||
elif disk.status.lower() in ('unconfigured', 'available', 'ugood',
|
||||
'unconfigured good'):
|
||||
self._make_available(disk, realcfg)
|
||||
for pool in cfgspec.arrays:
|
||||
if pool.disks:
|
||||
self._create_array(pool)
|
||||
|
||||
@property
|
||||
def wc(self):
|
||||
if (not self._wc or (self._wc.vintage and
|
||||
|
Loading…
x
Reference in New Issue
Block a user