mirror of
https://github.com/xcat2/confluent.git
synced 2025-04-14 09:12:34 +00:00
Revert "Add MegaRAC discovery support for recent MegaRAC"
This reverts commit 07005d83ca09784b47903fb44f34d02aca48ec6e. Premature addition to master branch
This commit is contained in:
parent
07005d83ca
commit
9d979256eb
@ -74,7 +74,6 @@ import confluent.discovery.handlers.tsm as tsm
|
||||
import confluent.discovery.handlers.pxe as pxeh
|
||||
import confluent.discovery.handlers.smm as smm
|
||||
import confluent.discovery.handlers.xcc as xcc
|
||||
import confluent.discovery.handlers.megarac as megarac
|
||||
import confluent.exceptions as exc
|
||||
import confluent.log as log
|
||||
import confluent.messages as msg
|
||||
@ -114,7 +113,6 @@ nodehandlers = {
|
||||
'service:lenovo-smm': smm,
|
||||
'service:lenovo-smm2': smm,
|
||||
'lenovo-xcc': xcc,
|
||||
'megarac-bmc': megarac,
|
||||
'service:management-hardware.IBM:integrated-management-module2': imm,
|
||||
'pxe-client': pxeh,
|
||||
'onie-switch': None,
|
||||
@ -134,7 +132,6 @@ servicenames = {
|
||||
'service:lenovo-smm2': 'lenovo-smm2',
|
||||
'affluent-switch': 'affluent-switch',
|
||||
'lenovo-xcc': 'lenovo-xcc',
|
||||
'megarac-bmc': 'megarac-bmc',
|
||||
#'openbmc': 'openbmc',
|
||||
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
|
||||
'service:io-device.Lenovo:management-module': 'lenovo-switch',
|
||||
@ -150,7 +147,6 @@ servicebyname = {
|
||||
'lenovo-smm2': 'service:lenovo-smm2',
|
||||
'affluent-switch': 'affluent-switch',
|
||||
'lenovo-xcc': 'lenovo-xcc',
|
||||
'megarac-bmc': 'megarac-bmc',
|
||||
'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2',
|
||||
'lenovo-switch': 'service:io-device.Lenovo:management-module',
|
||||
'thinkagile-storage': 'service:thinkagile-storagebmc',
|
||||
@ -457,7 +453,7 @@ def iterate_addrs(addrs, countonly=False):
|
||||
yield 1
|
||||
return
|
||||
yield addrs
|
||||
|
||||
|
||||
def _parameterize_path(pathcomponents):
|
||||
listrequested = False
|
||||
childcoll = True
|
||||
@ -546,7 +542,7 @@ def handle_api_request(configmanager, inputdata, operation, pathcomponents):
|
||||
if len(pathcomponents) > 2:
|
||||
raise Exception('TODO')
|
||||
currsubs = get_subscriptions()
|
||||
return [msg.ChildCollection(x) for x in currsubs]
|
||||
return [msg.ChildCollection(x) for x in currsubs]
|
||||
elif operation == 'retrieve':
|
||||
return handle_read_api_request(pathcomponents)
|
||||
elif (operation in ('update', 'create') and
|
||||
@ -1707,4 +1703,3 @@ if __name__ == '__main__':
|
||||
start_detection()
|
||||
while True:
|
||||
eventlet.sleep(30)
|
||||
|
||||
|
@ -1,51 +0,0 @@
|
||||
# Copyright 2024 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import confluent.discovery.handlers.redfishbmc as redfishbmc
|
||||
import eventlet.support.greendns
|
||||
|
||||
|
||||
getaddrinfo = eventlet.support.greendns.getaddrinfo
|
||||
|
||||
|
||||
class NodeHandler(redfishbmc.NodeHandler):
|
||||
|
||||
def get_firmware_default_account_info(self):
|
||||
return ('admin', 'admin')
|
||||
|
||||
|
||||
def remote_nodecfg(nodename, cfm):
|
||||
cfg = cfm.get_node_attributes(
|
||||
nodename, 'hardwaremanagement.manager')
|
||||
ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get(
|
||||
'value', None)
|
||||
ipaddr = ipaddr.split('/', 1)[0]
|
||||
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
|
||||
if not ipaddr:
|
||||
raise Exception('Cannot remote configure a system without known '
|
||||
'address')
|
||||
info = {'addresses': [ipaddr]}
|
||||
nh = NodeHandler(info, cfm)
|
||||
nh.config(nodename)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import confluent.config.configmanager as cfm
|
||||
c = cfm.ConfigManager(None)
|
||||
import sys
|
||||
info = {'addresses': [[sys.argv[1]]]}
|
||||
print(repr(info))
|
||||
testr = NodeHandler(info, c)
|
||||
testr.config(sys.argv[2])
|
||||
|
@ -1,269 +0,0 @@
|
||||
# Copyright 2024 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import confluent.discovery.handlers.generic as generic
|
||||
import confluent.exceptions as exc
|
||||
import confluent.netutil as netutil
|
||||
import confluent.util as util
|
||||
import eventlet
|
||||
import eventlet.support.greendns
|
||||
import json
|
||||
try:
|
||||
from urllib import urlencode
|
||||
except ImportError:
|
||||
from urllib.parse import urlencode
|
||||
|
||||
getaddrinfo = eventlet.support.greendns.getaddrinfo
|
||||
|
||||
webclient = eventlet.import_patched('pyghmi.util.webclient')
|
||||
|
||||
def get_host_interface_urls(wc, mginfo):
|
||||
returls = []
|
||||
hifurl = mginfo.get('HostInterfaces', {}).get('@odata.id', None)
|
||||
if not hifurl:
|
||||
return None
|
||||
hifinfo = wc.grab_json_response(hifurl)
|
||||
hifurls = hifinfo.get('Members', [])
|
||||
for hifurl in hifurls:
|
||||
hifurl = hifurl['@odata.id']
|
||||
hifinfo = wc.grab_json_response(hifurl)
|
||||
acturl = hifinfo.get('ManagerEthernetInterface', {}).get('@odata.id', None)
|
||||
if acturl:
|
||||
returls.append(acturl)
|
||||
return returls
|
||||
|
||||
|
||||
class NodeHandler(generic.NodeHandler):
|
||||
devname = 'BMC'
|
||||
|
||||
def __init__(self, info, configmanager):
|
||||
self.trieddefault = None
|
||||
self.targuser = None
|
||||
self.curruser = None
|
||||
self.currpass = None
|
||||
self.targpass = None
|
||||
self.nodename = None
|
||||
self.csrftok = None
|
||||
self.channel = None
|
||||
self.atdefault = True
|
||||
super(NodeHandler, self).__init__(info, configmanager)
|
||||
|
||||
def get_firmware_default_account_info(self):
|
||||
raise Exception('This must be subclassed')
|
||||
|
||||
def scan(self):
|
||||
c = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
|
||||
i = c.grab_json_response('/redfish/v1/')
|
||||
uuid = i.get('UUID', None)
|
||||
if uuid:
|
||||
self.info['uuid'] = uuid.lower()
|
||||
|
||||
def validate_cert(self, certificate):
|
||||
# broadly speaking, merely checks consistency moment to moment,
|
||||
# but if https_cert gets stricter, this check means something
|
||||
fprint = util.get_fingerprint(self.https_cert)
|
||||
return util.cert_matches(fprint, certificate)
|
||||
|
||||
def _get_wc(self):
|
||||
defuser, defpass = self.get_firmware_default_account_info()
|
||||
wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
|
||||
wc.set_basic_credentials(defuser, defpass)
|
||||
wc.set_header('Content-Type', 'application/json')
|
||||
authmode = 0
|
||||
if not self.trieddefault:
|
||||
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
if status == 403:
|
||||
self.trieddefault = True
|
||||
chgurl = None
|
||||
rsp = json.loads(rsp)
|
||||
currerr = rsp.get('error', {})
|
||||
ecode = currerr.get('code', None)
|
||||
if ecode.endswith('PasswordChangeRequired'):
|
||||
for einfo in currerr.get('@Message.ExtendedInfo', []):
|
||||
if einfo.get('MessageId', None).endswith('PasswordChangeRequired'):
|
||||
for msgarg in einfo.get('MessageArgs'):
|
||||
chgurl = msgarg
|
||||
break
|
||||
if chgurl:
|
||||
if self.targpass == defpass:
|
||||
raise Exception("Must specify a non-default password to onboard this BMC")
|
||||
wc.set_header('If-Match', '*')
|
||||
cpr = wc.grab_json_response_with_status(chgurl, {'Password': self.targpass}, method='PATCH')
|
||||
if cpr[1] >= 200 and cpr[1] < 300:
|
||||
self.curruser = defuser
|
||||
self.currpass = self.targpass
|
||||
wc.set_basic_credentials(self.curruser, self.currpass)
|
||||
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
tries = 10
|
||||
while status >= 300 and tries:
|
||||
eventlet.sleep(1)
|
||||
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
return wc
|
||||
|
||||
if status > 400:
|
||||
self.trieddefault = True
|
||||
if status == 401:
|
||||
wc.set_basic_credentials(self.DEFAULT_USER, self.targpass)
|
||||
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
if status == 200: # Default user still, but targpass
|
||||
self.currpass = self.targpass
|
||||
self.curruser = defuser
|
||||
return wc
|
||||
elif self.targuser != defuser:
|
||||
wc.set_basic_credentials(self.targuser, self.targpass)
|
||||
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
if status != 200:
|
||||
raise Exception("Target BMC does not recognize firmware default credentials nor the confluent stored credential")
|
||||
else:
|
||||
self.curruser = defuser
|
||||
self.currpass = defpass
|
||||
return wc
|
||||
if self.curruser:
|
||||
wc.set_basic_credentials(self.curruser, self.currpass)
|
||||
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
if status != 200:
|
||||
return None
|
||||
return wc
|
||||
wc.set_basic_credentials(self.targuser, self.targpass)
|
||||
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
if status != 200:
|
||||
return None
|
||||
self.curruser = self.targuser
|
||||
self.currpass = self.targpass
|
||||
return wc
|
||||
|
||||
def config(self, nodename):
|
||||
self.nodename = nodename
|
||||
creds = self.configmanager.get_node_attributes(
|
||||
nodename, ['secret.hardwaremanagementuser',
|
||||
'secret.hardwaremanagementpassword',
|
||||
'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'],
|
||||
True)
|
||||
cd = creds.get(nodename, {})
|
||||
defuser, defpass = self.get_firmware_default_account_info()
|
||||
user, passwd, _ = self.get_node_credentials(
|
||||
nodename, creds, defuser, defpass)
|
||||
user = util.stringify(user)
|
||||
passwd = util.stringify(passwd)
|
||||
self.targuser = user
|
||||
self.targpass = passwd
|
||||
wc = self._get_wc()
|
||||
srvroot, status = wc.grab_json_response_with_status('/redfish/v1/')
|
||||
curruserinfo = {}
|
||||
authupdate = {}
|
||||
wc.set_header('Content-Type', 'application/json')
|
||||
if user != self.curruser:
|
||||
authupdate['UserName'] = user
|
||||
if passwd != self.currpass:
|
||||
authupdate['Password'] = passwd
|
||||
if authupdate:
|
||||
targaccturl = None
|
||||
asrv = srvroot.get('AccountService', {}).get('@odata.id')
|
||||
rsp, status = wc.grab_json_response_with_status(asrv)
|
||||
accts = rsp.get('Accounts', {}).get('@odata.id')
|
||||
rsp, status = wc.grab_json_response_with_status(accts)
|
||||
accts = rsp.get('Members', [])
|
||||
for accturl in accts:
|
||||
accturl = accturl.get('@odata.id', '')
|
||||
if accturl:
|
||||
rsp, status = wc.grab_json_response_with_status(accturl)
|
||||
if rsp.get('UserName', None) == self.curruser:
|
||||
targaccturl = accturl
|
||||
break
|
||||
else:
|
||||
raise Exception("Unable to identify Account URL to modify on this BMC")
|
||||
rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH')
|
||||
if status >= 300:
|
||||
raise Exception("Failed attempting to update credentials on BMC")
|
||||
wc.set_basic_credentials(user, passwd)
|
||||
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
tries = 10
|
||||
while tries and status >= 300:
|
||||
tries -= 1
|
||||
eventlet.sleep(1.0)
|
||||
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
|
||||
if ('hardwaremanagement.manager' in cd and
|
||||
cd['hardwaremanagement.manager']['value'] and
|
||||
not cd['hardwaremanagement.manager']['value'].startswith(
|
||||
'fe80::')):
|
||||
newip = cd['hardwaremanagement.manager']['value']
|
||||
newip = newip.split('/', 1)[0]
|
||||
newipinfo = getaddrinfo(newip, 0)[0]
|
||||
newip = newipinfo[-1][0]
|
||||
if ':' in newip:
|
||||
raise exc.NotImplementedException('IPv6 remote config TODO')
|
||||
mgrs = srvroot['Managers']['@odata.id']
|
||||
rsp = wc.grab_json_response(mgrs)
|
||||
if len(rsp['Members']) != 1:
|
||||
raise Exception("Can not handle multiple Managers")
|
||||
mgrurl = rsp['Members'][0]['@odata.id']
|
||||
mginfo = wc.grab_json_response(mgrurl)
|
||||
hifurls = get_host_interface_urls(wc, mginfo)
|
||||
mgtnicinfo = mginfo['EthernetInterfaces']['@odata.id']
|
||||
mgtnicinfo = wc.grab_json_response(mgtnicinfo)
|
||||
mgtnics = [x['@odata.id'] for x in mgtnicinfo.get('Members', [])]
|
||||
actualnics = []
|
||||
for candnic in mgtnics:
|
||||
if candnic in hifurls:
|
||||
continue
|
||||
actualnics.append(candnic)
|
||||
if len(actualnics) != 1:
|
||||
raise Exception("Multi-interface BMCs are not supported currently")
|
||||
currnet = wc.grab_json_response(actualnics[0])
|
||||
netconfig = netutil.get_nic_config(self.configmanager, nodename, ip=newip)
|
||||
newconfig = {
|
||||
"Address": newip,
|
||||
"SubnetMask": netutil.cidr_to_mask(netconfig['prefix']),
|
||||
}
|
||||
newgw = netconfig['ipv4_gateway']
|
||||
if newgw:
|
||||
newconfig['Gateway'] = newgw
|
||||
else:
|
||||
newconfig['Gateway'] = newip # required property, set to self just to have a value
|
||||
for net in currnet.get("IPv4Addresses", []):
|
||||
if net["Address"] == newip and net["SubnetMask"] == newconfig['SubnetMask'] and (not newgw or newconfig['Gateway'] == newgw):
|
||||
break
|
||||
else:
|
||||
wc.set_header('If-Match', '*')
|
||||
rsp, status = wc.grab_json_response_with_status(actualnics[0], {'IPv4StaticAddresses': [newconfig]}, method='PATCH')
|
||||
elif self.ipaddr.startswith('fe80::'):
|
||||
self.configmanager.set_node_attributes(
|
||||
{nodename: {'hardwaremanagement.manager': self.ipaddr}})
|
||||
else:
|
||||
raise exc.TargetEndpointUnreachable(
|
||||
'hardwaremanagement.manager must be set to desired address (No IPv6 Link Local detected)')
|
||||
|
||||
|
||||
def remote_nodecfg(nodename, cfm):
|
||||
cfg = cfm.get_node_attributes(
|
||||
nodename, 'hardwaremanagement.manager')
|
||||
ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get(
|
||||
'value', None)
|
||||
ipaddr = ipaddr.split('/', 1)[0]
|
||||
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
|
||||
if not ipaddr:
|
||||
raise Exception('Cannot remote configure a system without known '
|
||||
'address')
|
||||
info = {'addresses': [ipaddr]}
|
||||
nh = NodeHandler(info, cfm)
|
||||
nh.config(nodename)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import confluent.config.configmanager as cfm
|
||||
c = cfm.ConfigManager(None)
|
||||
import sys
|
||||
info = {'addresses': [[sys.argv[1]]] }
|
||||
print(repr(info))
|
||||
testr = NodeHandler(info, c)
|
||||
testr.config(sys.argv[2])
|
@ -60,7 +60,6 @@ def active_scan(handler, protocol=None):
|
||||
known_peers = set([])
|
||||
for scanned in scan(['urn:dmtf-org:service:redfish-rest:1', 'urn::service:affluent']):
|
||||
for addr in scanned['addresses']:
|
||||
addr = addr[0:1] + addr[2:]
|
||||
if addr in known_peers:
|
||||
break
|
||||
hwaddr = neighutil.get_hwaddr(addr[0])
|
||||
@ -80,20 +79,13 @@ def scan(services, target=None):
|
||||
|
||||
|
||||
def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler):
|
||||
if mac in peerbymacaddress:
|
||||
normpeer = peer[0:1] + peer[2:]
|
||||
for currpeer in peerbymacaddress[mac]['addresses']:
|
||||
currnormpeer = currpeer[0:1] + peer[2:]
|
||||
if currnormpeer == normpeer:
|
||||
break
|
||||
else:
|
||||
peerbymacaddress[mac]['addresses'].append(peer)
|
||||
if mac in peerbymacaddress and peer not in peerbymacaddress[mac]['addresses']:
|
||||
peerbymacaddress[mac]['addresses'].append(peer)
|
||||
else:
|
||||
peerdata = {
|
||||
'hwaddr': mac,
|
||||
'addresses': [peer],
|
||||
}
|
||||
targurl = None
|
||||
for headline in rsp[1:]:
|
||||
if not headline:
|
||||
continue
|
||||
@ -113,20 +105,13 @@ def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byeha
|
||||
if not value.endswith('/redfish/v1/'):
|
||||
return
|
||||
elif header == 'LOCATION':
|
||||
if '/eth' in value and value.endswith('.xml'):
|
||||
targurl = '/redfish/v1/'
|
||||
targtype = 'megarac-bmc'
|
||||
continue # MegaRAC redfish
|
||||
elif value.endswith('/DeviceDescription.json'):
|
||||
targurl = '/DeviceDescription.json'
|
||||
targtype = 'megarac-bmc'
|
||||
else:
|
||||
if not value.endswith('/DeviceDescription.json'):
|
||||
return
|
||||
if handler and targurl:
|
||||
eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype)
|
||||
if handler:
|
||||
eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer)
|
||||
|
||||
def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype):
|
||||
retdata = check_fish((targurl, peerdata, targtype))
|
||||
def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer):
|
||||
retdata = check_fish(('/DeviceDescription.json', peerdata))
|
||||
if retdata:
|
||||
known_peers.add(peer)
|
||||
newmacs.add(mac)
|
||||
@ -337,7 +322,7 @@ def _find_service(service, target):
|
||||
host = '[{0}]'.format(host)
|
||||
msg = smsg.format(host, service)
|
||||
if not isinstance(msg, bytes):
|
||||
msg = msg.encode('utf8')
|
||||
msg = msg.encode('utf8')
|
||||
net6.sendto(msg, addr[4])
|
||||
else:
|
||||
net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
||||
@ -425,11 +410,7 @@ def _find_service(service, target):
|
||||
if '/redfish/v1/' not in peerdata[nid].get('urls', ()) and '/redfish/v1' not in peerdata[nid].get('urls', ()):
|
||||
continue
|
||||
if '/DeviceDescription.json' in peerdata[nid]['urls']:
|
||||
pooltargs.append(('/DeviceDescription.json', peerdata[nid], 'lenovo-xcc'))
|
||||
else:
|
||||
for targurl in peerdata[nid]['urls']:
|
||||
if '/eth' in targurl and targurl.endswith('.xml'):
|
||||
pooltargs.append(('/redfish/v1/', peerdata[nid], 'megarac-bmc'))
|
||||
pooltargs.append(('/DeviceDescription.json', peerdata[nid]))
|
||||
# For now, don't interrogate generic redfish bmcs
|
||||
# This is due to a need to deduplicate from some supported SLP
|
||||
# targets (IMM, TSM, others)
|
||||
@ -444,7 +425,7 @@ def _find_service(service, target):
|
||||
def check_fish(urldata, port=443, verifycallback=None):
|
||||
if not verifycallback:
|
||||
verifycallback = lambda x: True
|
||||
url, data, targtype = urldata
|
||||
url, data = urldata
|
||||
try:
|
||||
wc = webclient.SecureHTTPConnection(_get_svrip(data), port, verifycallback=verifycallback, timeout=1.5)
|
||||
peerinfo = wc.grab_json_response(url)
|
||||
@ -466,7 +447,7 @@ def check_fish(urldata, port=443, verifycallback=None):
|
||||
peerinfo = wc.grab_json_response('/redfish/v1/')
|
||||
if url == '/redfish/v1/':
|
||||
if 'UUID' in peerinfo:
|
||||
data['services'] = [targtype]
|
||||
data['services'] = ['service:redfish-bmc']
|
||||
data['uuid'] = peerinfo['UUID'].lower()
|
||||
return data
|
||||
return None
|
||||
@ -485,12 +466,7 @@ def _parse_ssdp(peer, rsp, peerdata):
|
||||
if code == b'200':
|
||||
if nid in peerdata:
|
||||
peerdatum = peerdata[nid]
|
||||
normpeer = peer[0:1] + peer[2:]
|
||||
for currpeer in peerdatum['addresses']:
|
||||
currnormpeer = currpeer[0:1] + peer[2:]
|
||||
if currnormpeer == normpeer:
|
||||
break
|
||||
else:
|
||||
if peer not in peerdatum['addresses']:
|
||||
peerdatum['addresses'].append(peer)
|
||||
else:
|
||||
peerdatum = {
|
||||
@ -525,7 +501,5 @@ def _parse_ssdp(peer, rsp, peerdata):
|
||||
|
||||
if __name__ == '__main__':
|
||||
def printit(rsp):
|
||||
pass # print(repr(rsp))
|
||||
print(repr(rsp))
|
||||
active_scan(printit)
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user