2
0
mirror of https://github.com/xcat2/confluent.git synced 2024-11-22 01:22:00 +00:00

Add reporting of skipped nodes in a 'skip' merge

This commit is contained in:
Jarrod Johnson 2024-08-14 11:40:11 -04:00
parent 29d0e90487
commit 28b88bdb12
2 changed files with 22 additions and 10 deletions

View File

@ -1,7 +1,7 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
# Copyright 2017,2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -72,9 +72,19 @@ if args[0] in ('restore', 'merge'):
stateless = args[0] == 'restore'
cfm.init(stateless)
cfm.statelessmode = stateless
skipped = {'nodes': [], 'nodegroups': []}
cfm.restore_db_from_directory(
dumpdir, password,
merge="skip" if args[0] == 'merge' else False)
merge="skip" if args[0] == 'merge' else False, skipped=skipped)
if skipped['nodes']:
skippedn = ','.join(skipped['nodes'])
print('The following nodes were skipped during merge: '
'{}'.format(skippedn))
if skipped['nodegroups']:
skippedn = ','.join(skipped['nodegroups'])
print('The following node groups were skipped during merge: '
'{}'.format(skippedn))
cfm.statelessmode = False
cfm.ConfigManager.wait_for_sync(True)
if owner != 0:

View File

@ -2519,19 +2519,21 @@ class ConfigManager(object):
self._bg_sync_to_file()
#TODO: wait for synchronization to suceed/fail??)
def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None):
def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None, skipped=None):
self.inrestore = True
try:
self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata)
self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata, skipped=skipped)
finally:
self.inrestore = False
def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None):
def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None, skipped=None):
"""Load fresh configuration data from jsondata
:param jsondata: String of jsondata
:return:
"""
if not skipped:
skipped = {'nodes': None, 'nodegroups': None}
dumpdata = json.loads(jsondata)
tmpconfig = {}
for confarea in _config_areas:
@ -2588,9 +2590,9 @@ class ConfigManager(object):
if confarea not in tmpconfig:
continue
if confarea == 'nodes':
self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata)
self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodes'])
elif confarea == 'nodegroups':
self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata)
self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodegroups'])
elif confarea == 'usergroups':
if merge:
continue
@ -2934,7 +2936,7 @@ def _dump_keys(password, dojson=True):
return keydata
def restore_db_from_directory(location, password, merge=False):
def restore_db_from_directory(location, password, merge=False, skipped=None):
kdd = None
try:
with open(os.path.join(location, 'keys.json'), 'r') as cfgfile:
@ -2973,7 +2975,7 @@ def restore_db_from_directory(location, password, merge=False):
raise
with open(os.path.join(location, 'main.json'), 'r') as cfgfile:
cfgdata = cfgfile.read()
ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd)
ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd, skipped=skipped)
ConfigManager.wait_for_sync(True)